diff --git a/apis/cluster/v1beta1/zz_generated.deepcopy.go b/apis/cluster/v1beta1/zz_generated.deepcopy.go index f983670b4..4a52ac1e5 100644 --- a/apis/cluster/v1beta1/zz_generated.deepcopy.go +++ b/apis/cluster/v1beta1/zz_generated.deepcopy.go @@ -11,7 +11,7 @@ Licensed under the MIT license. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index ab640cf83..4e614c223 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -39,10 +39,10 @@ const ( // +kubebuilder:resource:scope="Cluster",shortName=crp,categories={fleet,fleet-placement} // +kubebuilder:subresource:status // +kubebuilder:printcolumn:JSONPath=`.metadata.generation`,name="Gen",type=string -// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string -// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].observedGeneration`,name="ScheduledGen",type=string -// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="Applied",type=string -// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].observedGeneration`,name="AppliedGen",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ClusterResourcePlacementScheduled")].status`,name="Scheduled",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ClusterResourcePlacementScheduled")].observedGeneration`,name="ScheduledGen",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ClusterResourcePlacementApplied")].status`,name="Applied",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="ClusterResourcePlacementApplied")].observedGeneration`,name="AppliedGen",type=string // +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index acfc9cb73..c55bb5975 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -11,7 +11,7 @@ Licensed under the MIT license. package v1beta1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index c584671c5..8ad756ac9 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -12,7 +12,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml index 9cbcd010b..b1440430a 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml @@ -23,16 +23,16 @@ spec: - jsonPath: .metadata.generation name: Gen type: string - - jsonPath: .status.conditions[?(@.type=="Scheduled")].status + - jsonPath: .status.conditions[?(@.type=="ClusterResourcePlacementScheduled")].status name: Scheduled type: string - - jsonPath: .status.conditions[?(@.type=="Scheduled")].observedGeneration + - jsonPath: .status.conditions[?(@.type=="ClusterResourcePlacementScheduled")].observedGeneration name: ScheduledGen type: string - - jsonPath: .status.conditions[?(@.type=="Applied")].status + - jsonPath: .status.conditions[?(@.type=="ClusterResourcePlacementApplied")].status name: Applied type: string - - jsonPath: .status.conditions[?(@.type=="Applied")].observedGeneration + - jsonPath: .status.conditions[?(@.type=="ClusterResourcePlacementApplied")].observedGeneration name: AppliedGen type: string - jsonPath: .metadata.creationTimestamp diff --git a/pkg/utils/validator/clusterresourceplacement.go b/pkg/utils/validator/clusterresourceplacement.go index f646c92c2..42d46e745 100644 --- a/pkg/utils/validator/clusterresourceplacement.go +++ b/pkg/utils/validator/clusterresourceplacement.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" apiErrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1" @@ -68,6 +69,10 @@ func ValidateClusterResourcePlacementAlpha(clusterResourcePlacement *fleetv1alph func ValidateClusterResourcePlacement(clusterResourcePlacement *placementv1beta1.ClusterResourcePlacement) error { allErr := make([]error, 0) + if len(clusterResourcePlacement.Name) > validation.DNS1035LabelMaxLength { + allErr = append(allErr, fmt.Errorf("the name field cannot have length exceeding %d", validation.DNS1035LabelMaxLength)) + } + for _, selector := range clusterResourcePlacement.Spec.ResourceSelectors { //TODO: make sure the selector's gvk is valid if selector.LabelSelector != nil { @@ -80,13 +85,18 @@ func ValidateClusterResourcePlacement(clusterResourcePlacement *placementv1beta1 } } + if clusterResourcePlacement.Spec.Policy != nil { + if err := validatePlacementPolicy(clusterResourcePlacement.Spec.Policy); err != nil { + allErr = append(allErr, fmt.Errorf("the placement policy field is invalid: %w", err)) + } + } + if clusterResourcePlacement.Spec.Policy != nil && clusterResourcePlacement.Spec.Policy.Affinity != nil && clusterResourcePlacement.Spec.Policy.Affinity.ClusterAffinity != nil { if err := validateClusterAffinity(clusterResourcePlacement.Spec.Policy.Affinity.ClusterAffinity); err != nil { allErr = append(allErr, fmt.Errorf("the clusterAffinity field is invalid: %w", err)) } } - if err := validateRolloutStrategy(clusterResourcePlacement.Spec.Strategy); err != nil { allErr = append(allErr, fmt.Errorf("the rollout Strategy field is invalid: %w", err)) } @@ -94,6 +104,56 @@ func ValidateClusterResourcePlacement(clusterResourcePlacement *placementv1beta1 return apiErrors.NewAggregate(allErr) } +func validatePlacementPolicy(policy *placementv1beta1.PlacementPolicy) error { + allErr := make([]error, 0) + switch policy.PlacementType { + case placementv1beta1.PickFixedPlacementType: + if err := validatePolicyForPickFixedPlacementType(policy); err != nil { + allErr = append(allErr, err) + } + case placementv1beta1.PickAllPlacementType: + if err := validatePolicyForPickAllPlacementType(policy); err != nil { + allErr = append(allErr, err) + } + case placementv1beta1.PickNPlacementType: + if err := validatePolicyForPickNPolicyType(policy); err != nil { + allErr = append(allErr, err) + } + } + + return apiErrors.NewAggregate(allErr) +} + +func validatePolicyForPickFixedPlacementType(policy *placementv1beta1.PlacementPolicy) error { + allErr := make([]error, 0) + if len(policy.ClusterNames) == 0 { + allErr = append(allErr, fmt.Errorf("cluster names cannot be empty for policy type %s", placementv1beta1.PickFixedPlacementType)) + } + if policy.NumberOfClusters != nil { + allErr = append(allErr, fmt.Errorf("number of clusters must be nil for policy type %s, only valid for PickN placement policy type", placementv1beta1.PickFixedPlacementType)) + } + if policy.Affinity != nil { + allErr = append(allErr, fmt.Errorf("affinity must be nil for policy type %s, only valid for PickAll/PickN placement poliy types", placementv1beta1.PickFixedPlacementType)) + } + if len(policy.TopologySpreadConstraints) > 0 { + allErr = append(allErr, fmt.Errorf("topology spread constraints needs to be empty for policy type %s, only valid for PickN policy type", placementv1beta1.PickFixedPlacementType)) + } + + return apiErrors.NewAggregate(allErr) +} + +func validatePolicyForPickAllPlacementType(_ *placementv1beta1.PlacementPolicy) error { + // TODO(Arvindthiru): implement this. + allErr := make([]error, 0) + return apiErrors.NewAggregate(allErr) +} + +func validatePolicyForPickNPolicyType(_ *placementv1beta1.PlacementPolicy) error { + // TODO(Arvindthiru): implement this. + allErr := make([]error, 0) + return apiErrors.NewAggregate(allErr) +} + func validateClusterAffinity(_ *placementv1beta1.ClusterAffinity) error { // TODO: implement this return nil @@ -101,7 +161,8 @@ func validateClusterAffinity(_ *placementv1beta1.ClusterAffinity) error { func validateRolloutStrategy(rolloutStrategy placementv1beta1.RolloutStrategy) error { allErr := make([]error, 0) - if rolloutStrategy.Type != placementv1beta1.RollingUpdateRolloutStrategyType { + + if rolloutStrategy.Type != "" && rolloutStrategy.Type != placementv1beta1.RollingUpdateRolloutStrategyType { allErr = append(allErr, fmt.Errorf("unsupported rollout strategy type `%s`", rolloutStrategy.Type)) } diff --git a/pkg/utils/validator/clusterresourceplacement_test.go b/pkg/utils/validator/clusterresourceplacement_test.go index 2e2ce326c..f5da2095e 100644 --- a/pkg/utils/validator/clusterresourceplacement_test.go +++ b/pkg/utils/validator/clusterresourceplacement_test.go @@ -16,28 +16,6 @@ import ( "go.goms.io/fleet/pkg/utils/informer" ) -func Test_validateRolloutStrategy(t *testing.T) { - tests := map[string]struct { - rolloutStrategy placementv1beta1.RolloutStrategy - wantErr bool - }{ - // TODO: Add test cases. - "invalid RolloutStrategyType should fail": { - rolloutStrategy: placementv1beta1.RolloutStrategy{ - Type: "random type", - }, - wantErr: true, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - if err := validateRolloutStrategy(tt.rolloutStrategy); (err != nil) != tt.wantErr { - t.Errorf("validateRolloutStrategy() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func Test_validateClusterResourcePlacementAlpha(t *testing.T) { tests := map[string]struct { crp *fleetv1alpha1.ClusterResourcePlacement @@ -196,6 +174,7 @@ func Test_validateClusterResourcePlacementAlpha(t *testing.T) { func Test_validateClusterResourcePlacement(t *testing.T) { unavailablePeriodSeconds := -10 + var numberOfClusters int32 = 1 tests := map[string]struct { crp *placementv1beta1.ClusterResourcePlacement resourceInformer informer.Manager @@ -222,7 +201,27 @@ func Test_validateClusterResourcePlacement(t *testing.T) { }, wantErr: false, }, - + "CRP with invalid name": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp-with-very-long-name-field-exceeding-DNS1035LabelMaxLength", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + }, + }, + }, + wantErr: true, + }, "invalid Resource Selector with name & label selector": { crp: &placementv1beta1.ClusterResourcePlacement{ ObjectMeta: metav1.ObjectMeta{ @@ -263,6 +262,27 @@ func Test_validateClusterResourcePlacement(t *testing.T) { }, }, }, + wantErr: false, + }, + "invalid rollout strategy": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: "random type", + }, + }, + }, wantErr: true, }, "invalid rollout strategy - UnavailablePeriodSeconds": { @@ -397,8 +417,122 @@ func Test_validateClusterResourcePlacement(t *testing.T) { }, wantErr: true, }, + "invalid placement policy - PickFixed with empty cluster names": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + MaxUnavailable: &intstr.IntOrString{ + Type: 0, + IntVal: 10, + }, + }, + }, + }, + }, + wantErr: true, + }, + "invalid placement policy - PickFixed with non nil number of clusters": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{"test-cluster"}, + NumberOfClusters: &numberOfClusters, + }, + }, + }, + wantErr: true, + }, + "invalid placement policy - PickFixed with non nil affinity": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{"test-cluster"}, + Affinity: &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"test-key": "test-value"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: true, + }, + "invalid placement policy - PickFixed with non empty topology constraints": { + crp: &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crp", + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []placementv1beta1.ClusterResourceSelector{ + { + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + Name: "test-cluster-role", + }, + }, + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{"test-cluster"}, + TopologySpreadConstraints: []placementv1beta1.TopologySpreadConstraint{ + { + TopologyKey: "test-key", + }, + }, + }, + }, + }, + wantErr: true, + }, } - for testName, testCase := range tests { t.Run(testName, func(t *testing.T) { ResourceInformer = testCase.resourceInformer diff --git a/pkg/webhook/clusterresourceplacement/v1alpha1_clusterresourceplacement_validating_webhook.go b/pkg/webhook/clusterresourceplacement/v1alpha1_clusterresourceplacement_validating_webhook.go index a567c00a2..7116f0709 100644 --- a/pkg/webhook/clusterresourceplacement/v1alpha1_clusterresourceplacement_validating_webhook.go +++ b/pkg/webhook/clusterresourceplacement/v1alpha1_clusterresourceplacement_validating_webhook.go @@ -37,6 +37,7 @@ func AddV1Alpha1(mgr manager.Manager, _ []string) error { func (v *v1alpha1ClusterResourcePlacementValidator) Handle(_ context.Context, req admission.Request) admission.Response { var crp fleetv1alpha1.ClusterResourcePlacement if req.Operation == admissionv1.Create || req.Operation == admissionv1.Update { + klog.V(2).InfoS("handling CRP", "operation", req.Operation, "namespacedName", types.NamespacedName{Name: req.Name}) if err := v.decoder.Decode(req, &crp); err != nil { klog.ErrorS(err, "failed to decode v1alpha1 CRP object for create/update operation", "userName", req.UserInfo.Username, "groups", req.UserInfo.Groups) return admission.Errored(http.StatusBadRequest, err) diff --git a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook.go b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook.go index bb5650cd8..6c158e891 100644 --- a/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook.go +++ b/pkg/webhook/clusterresourceplacement/v1beta1_clusterresourceplacement_validating_webhook.go @@ -37,6 +37,7 @@ func Add(mgr manager.Manager, _ []string) error { func (v *clusterResourcePlacementValidator) Handle(_ context.Context, req admission.Request) admission.Response { var crp placementv1beta1.ClusterResourcePlacement if req.Operation == admissionv1.Create || req.Operation == admissionv1.Update { + klog.V(2).InfoS("handling CRP", "operation", req.Operation, "namespacedName", types.NamespacedName{Name: req.Name}) if err := v.decoder.Decode(req, &crp); err != nil { klog.ErrorS(err, "failed to decode v1beta1 CRP object for create/update operation", "userName", req.UserInfo.Username, "groups", req.UserInfo.Groups) return admission.Errored(http.StatusBadRequest, err) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 7bf15fc21..75fb52eb7 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -161,12 +161,12 @@ func cleanupCRP(name string) { if errors.IsNotFound(err) { return nil } - Expect(err).Should(Succeed(), "Failed to get CRP %s", name) + g.Expect(err).Should(Succeed(), "Failed to get CRP %s", name) // Delete the CRP (again, if applicable). // - // This helps the AfterAll node to run successfully even if the steps above fail early. - Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP %s", name) + // This helps the After All node to run successfully even if the steps above fail early. + g.Expect(hubClient.Delete(ctx, crp)).To(Succeed(), "Failed to delete CRP %s", name) crp.Finalizers = []string{} return hubClient.Update(ctx, crp) diff --git a/test/e2e/webhook_test.go b/test/e2e/webhook_test.go index 9ddd10633..0d29a0777 100644 --- a/test/e2e/webhook_test.go +++ b/test/e2e/webhook_test.go @@ -82,7 +82,28 @@ var _ = Describe("webhook tests for CRP UPDATE operations", Ordered, func() { } var statusErr *k8sErrors.StatusError g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(string(statusErr.Status().Reason)).Should(Equal(fmt.Sprintf("the labelSelector and name fields are mutually exclusive in selector %+v", selector[0]))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("the labelSelector and name fields are mutually exclusive")) + return nil + }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) + }) + + It("should deny update on CRP with invalid placement policy", func() { + Eventually(func(g Gomega) error { + var numOfClusters int32 = 1 + var crp placementv1beta1.ClusterResourcePlacement + g.Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, &crp)).Should(Succeed()) + crp.Spec.Policy = &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + NumberOfClusters: &numOfClusters, + } + err := hubClient.Update(ctx, &crp) + if k8sErrors.IsConflict(err) { + return err + } + var statusErr *k8sErrors.StatusError + g.Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Update CRP call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("cluster names cannot be empty for policy type")) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("number of clusters must be nil for policy type PickFixed")) return nil }, testutils.PollTimeout, testutils.PollInterval).Should(Succeed()) }) diff --git a/test/scheduler/pickall_integration_test.go b/test/scheduler/pickall_integration_test.go index e7b87e383..71aecab97 100644 --- a/test/scheduler/pickall_integration_test.go +++ b/test/scheduler/pickall_integration_test.go @@ -13,6 +13,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) @@ -162,3 +163,426 @@ var _ = Describe("scheduling CRPs with no scheduling policy specified", func() { }) }) }) + +var _ = Describe("scheduling CRPs of the PickAll placement type", func() { + Context("pick all valid clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickAll placement type, along with its associated policy snapshot. + createPickAllCRPWithPolicySnapshot(crpName, nil, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create scheduled bindings for all healthy clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(healthyClusters, zeroScoreByCluster, crpName, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for unhealthy clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(unhealthyClusters, crpName) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(healthyClusters, unhealthyClusters, policySnapshotName) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the CRP. + ensureCRPAndAllRelatedResourcesDeletion(crpName) + }) + }) + + Context("pick clusters with specific affinities (single term, multiple selectors)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) + + wantTargetClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster6WestProd, + } + wantIgnoredClusters := []string{ + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickAll placement type, along with its associated policy snapshot. + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "prod", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{"east", "west"}, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllCRPWithPolicySnapshot(crpName, affinity, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the CRP. + ensureCRPAndAllRelatedResourcesDeletion(crpName) + }) + }) + + Context("pick clusters with specific affinities (multiple terms, single selector)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) + + wantTargetClusters := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickAll placement type, along with its associated policy snapshot. + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllCRPWithPolicySnapshot(crpName, affinity, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create scheduled bindings for all matching clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters, zeroScoreByCluster, crpName, policySnapshotName) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters, wantIgnoredClusters, policySnapshotName) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the CRP. + ensureCRPAndAllRelatedResourcesDeletion(crpName) + }) + }) + + Context("affinities updated", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + policySnapshotName1 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) + policySnapshotName2 := fmt.Sprintf(policySnapshotNameTemplate, crpName, 2) + + wantTargetClusters1 := []string{ + memberCluster3EastCanary, + memberCluster7WestCanary, + } + wantTargetClusters2 := []string{ + memberCluster3EastCanary, + memberCluster6WestProd, + memberCluster7WestCanary, + } + wantIgnoredClusters2 := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + boundClusters := []string{ + memberCluster3EastCanary, + } + scheduledClusters := []string{ + memberCluster6WestProd, + memberCluster7WestCanary, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + // Create a CRP of the PickAll placement type, along with its associated policy snapshot. + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpIn, + Values: []string{ + "east", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllCRPWithPolicySnapshot(crpName, affinity, policySnapshotName1) + + // Verify that bindings have been created as expected. + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(wantTargetClusters1, zeroScoreByCluster, crpName, policySnapshotName1) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + + // Bind some bindings. + markBindingsAsBoundForClusters(crpName, boundClusters) + + // Update the CRP with a new affinity. + affinity = &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "canary", + }, + }, + }, + { + LabelSelector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + }, + }, + }, + }, + }, + }, + }, + }, + } + updatePickAllCRPWithNewAffinity(crpName, affinity, policySnapshotName1, policySnapshotName2) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should create/update scheduled bindings for newly matched clusters", func() { + scheduledBindingsCreatedActual := scheduledBindingsCreatedOrUpdatedForClustersActual(scheduledClusters, zeroScoreByCluster, crpName, policySnapshotName2) + Eventually(scheduledBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + Consistently(scheduledBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to create the expected set of bindings") + }) + + It("should update bound bindings for newly matched clusters", func() { + boundBindingsUpdatedActual := boundBindingsCreatedOrUpdatedForClustersActual(boundClusters, zeroScoreByCluster, crpName, policySnapshotName2) + Eventually(boundBindingsUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + Consistently(boundBindingsUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update the expected set of bindings") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters2, crpName) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual(wantTargetClusters2, wantIgnoredClusters2, policySnapshotName2) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the CRP. + ensureCRPAndAllRelatedResourcesDeletion(crpName) + }) + }) + + Context("no matching clusters", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + policySnapshotName := fmt.Sprintf(policySnapshotNameTemplate, crpName, 1) + + wantIgnoredClusters := []string{ + memberCluster1EastProd, + memberCluster2EastProd, + memberCluster3EastCanary, + memberCluster4CentralProd, + memberCluster5CentralProd, + memberCluster6WestProd, + memberCluster7WestCanary, + memberCluster8UnhealthyEastProd, + memberCluster9LeftCentralProd, + } + + BeforeAll(func() { + // Ensure that no bindings have been created so far. + noBindingsCreatedActual := noBindingsCreatedForCRPActual(crpName) + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + + affinity := &placementv1beta1.Affinity{ + ClusterAffinity: &placementv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &placementv1beta1.ClusterSelector{ + ClusterSelectorTerms: []placementv1beta1.ClusterSelectorTerm{ + { + LabelSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + envLabel: "wonderland", + }, + }, + }, + { + LabelSelector: metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: regionLabel, + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{ + "east", + "central", + "west", + }, + }, + }, + }, + }, + }, + }, + }, + } + createPickAllCRPWithPolicySnapshot(crpName, affinity, policySnapshotName) + }) + + It("should add scheduler cleanup finalizer to the CRP", func() { + finalizerAddedActual := crpSchedulerFinalizerAddedActual(crpName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add scheduler cleanup finalizer to CRP") + }) + + It("should not create any binding for non-matching clusters", func() { + noBindingsCreatedActual := noBindingsCreatedForClustersActual(wantIgnoredClusters, crpName) + Eventually(noBindingsCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + Consistently(noBindingsCreatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Some bindings have been created unexpectedly") + }) + + It("should report status correctly", func() { + statusUpdatedActual := pickAllPolicySnapshotStatusUpdatedActual([]string{}, wantIgnoredClusters, policySnapshotName) + Eventually(statusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update status") + Consistently(statusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Failed to update status") + }) + + AfterAll(func() { + // Delete the CRP. + ensureCRPAndAllRelatedResourcesDeletion(crpName) + }) + }) +}) diff --git a/test/scheduler/utils_test.go b/test/scheduler/utils_test.go index d57e959dc..250e62f2a 100644 --- a/test/scheduler/utils_test.go +++ b/test/scheduler/utils_test.go @@ -464,3 +464,82 @@ func ensureProvisionalClusterDeletion(clusterName string) { return err }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to delete member cluster") } + +func createPickAllCRPWithPolicySnapshot(crpName string, affinity *placementv1beta1.Affinity, policySnapshotName string) { + policy := &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickAllPlacementType, + Affinity: affinity, + } + + // Create a CRP of the PickAll placement type. + crp := placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: defaultResourceSelectors, + Policy: policy, + }, + } + Expect(hubClient.Create(ctx, &crp)).Should(Succeed(), "Failed to create CRP") + + crpGeneration := crp.Generation + + // Create the associated policy snapshot. + policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(crpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).Should(Succeed(), "Failed to create policy snapshot") +} + +func updatePickAllCRPWithNewAffinity(crpName string, affinity *placementv1beta1.Affinity, oldPolicySnapshotName, newPolicySnapshotName string) { + // Update the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp)).To(Succeed(), "Failed to get CRP") + + policy := crp.Spec.Policy.DeepCopy() + policy.Affinity = affinity + crp.Spec.Policy = policy + Expect(hubClient.Update(ctx, crp)).To(Succeed(), "Failed to update CRP") + + crpGeneration := crp.Generation + + // Mark the old policy snapshot as inactive. + policySnapshot := &placementv1beta1.ClusterSchedulingPolicySnapshot{} + Expect(hubClient.Get(ctx, types.NamespacedName{Name: oldPolicySnapshotName}, policySnapshot)).To(Succeed(), "Failed to get policy snapshot") + policySnapshot.Labels[placementv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + Expect(hubClient.Update(ctx, policySnapshot)).To(Succeed(), "Failed to update policy snapshot") + + // Create a new policy snapshot. + policySnapshot = &placementv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: newPolicySnapshotName, + Labels: map[string]string{ + placementv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), + placementv1beta1.CRPTrackingLabel: crpName, + }, + Annotations: map[string]string{ + placementv1beta1.CRPGenerationAnnotation: strconv.FormatInt(crpGeneration, 10), + }, + }, + Spec: placementv1beta1.SchedulingPolicySnapshotSpec{ + Policy: policy, + PolicyHash: []byte(policyHash), + }, + } + Expect(hubClient.Create(ctx, policySnapshot)).To(Succeed(), "Failed to create policy snapshot") +}