From 658d8d7c699c5000aac33d28e07eca860b6f16a7 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 12 Oct 2023 09:05:35 -0700 Subject: [PATCH 1/6] rebase --- apis/placement/v1beta1/commons.go | 39 ++- go.mod | 18 +- go.sum | 9 + pkg/authtoken/providers/azure/azure_msi.go | 2 +- .../placement_status.go | 30 +- .../placement_status_test.go | 225 +++++++++++++- pkg/controllers/workgenerator/controller.go | 286 +++++++++++++----- .../controller_integration_test.go | 282 ++++++++++++++++- .../workgenerator/controller_test.go | 10 +- .../manifests/resourceQuota.yaml | 11 + .../manifests/test-envelop-configmap.yaml | 51 ++++ .../manifests/test-envelop-configmap2.yaml | 39 +++ .../workgenerator/manifests/webhook.yaml | 29 ++ pkg/controllers/workgenerator/suite_test.go | 29 +- pkg/utils/common.go | 6 + test/integration/cluster_placement_test.go | 3 +- 16 files changed, 950 insertions(+), 119 deletions(-) create mode 100644 pkg/controllers/workgenerator/manifests/resourceQuota.yaml create mode 100644 pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml create mode 100644 pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml create mode 100644 pkg/controllers/workgenerator/manifests/webhook.yaml diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index 5a1484011..e74debac4 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -30,20 +30,29 @@ const ( WorkFinalizer = fleetPrefix + "work-cleanup" // CRPTrackingLabel is the label that points to the cluster resource policy that creates a resource binding. - CRPTrackingLabel = fleetPrefix + "parentCRP" + CRPTrackingLabel = fleetPrefix + "parent-CRP" + + // ResourceSnapshotTrackingLabel is the label that points to the cluster resource snapshot that this work is generated from. + ResourceSnapshotTrackingLabel = fleetPrefix + "parent-resource-snapshot" // IsLatestSnapshotLabel tells if the snapshot is the latest one. - IsLatestSnapshotLabel = fleetPrefix + "isLatestSnapshot" + IsLatestSnapshotLabel = fleetPrefix + "is-latest-snapshot" // FleetResourceLabelKey is that label that indicates the resource is a fleet resource. - FleetResourceLabelKey = fleetPrefix + "isFleetResource" + FleetResourceLabelKey = fleetPrefix + "is-fleet-resource" - // FirstWorkNameFmt is the format of the name of the first work. + // FirstWorkNameFmt is the format of the name of the work generated with first resource snapshot . + // The name of the first work is {crpName}-work. FirstWorkNameFmt = "%s-work" - // WorkNameWithSubindexFmt is the format of the name of a work with subindex. + // WorkNameWithSubindexFmt is the format of the name of a work generated with resource snapshot with subindex. + // The name of the first work is {crpName}-{subindex}. WorkNameWithSubindexFmt = "%s-%d" + // WorkNameWithConfigEnvelopeFmt is the format of the name of a work generated with config envelop. + // The format is {workPrefix}-configMap-uuid + WorkNameWithConfigEnvelopeFmt = "%s-configmap-%s" + // ParentResourceSnapshotIndexLabel is the label applied to work that contains the index of the resource snapshot that generates the work. ParentResourceSnapshotIndexLabel = fleetPrefix + "parent-resource-snapshot-index" @@ -52,13 +61,23 @@ const ( // CRPGenerationAnnotation is the annotation that indicates the generation of the CRP from // which an object is derived or last updated. - CRPGenerationAnnotation = fleetPrefix + "CRPGeneration" + CRPGenerationAnnotation = fleetPrefix + "CRP-generation" + + // EnvelopeConfigMapAnnotation is the annotation that indicates the configmap is an envelope configmap that contains resources + // we need to apply to the member cluster instead of the configMap itself. + EnvelopeConfigMapAnnotation = fleetPrefix + "envelope-configmap" + + // EnvelopeTypeLabel is the label that marks the work object as generated from an envelope object. + // The value of the annotation is the type of the envelope object. + EnvelopeTypeLabel = fleetPrefix + "envelope-work" + + // EnvelopeNamespaceLabel is the label that contains the namespace of the envelope object that the work is generated from. + EnvelopeNamespaceLabel = fleetPrefix + "envelope-namespace" + + // EnvelopeNameLabel is the label that contains the name of the envelope object that the work is generated from. + EnvelopeNameLabel = fleetPrefix + "envelope-name" // PreviousBindingStateAnnotation is the annotation that records the previous state of a binding. // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. PreviousBindingStateAnnotation = fleetPrefix + "PreviousBindingState" - - // EnvelopeConfigMapAnnotation is the annotation that indicates the configmap is an envelope configmap that contains resources - // we need to apply to the member cluster instead of the configMap itself. - EnvelopeConfigMapAnnotation = fleetPrefix + "EnvelopeConfigMap" ) diff --git a/go.mod b/go.mod index a7038db63..b1d4813f9 100644 --- a/go.mod +++ b/go.mod @@ -3,21 +3,21 @@ module go.goms.io/fleet go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/crossplane/crossplane-runtime v0.19.2 github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 github.com/onsi/ginkgo/v2 v2.9.5 github.com/onsi/gomega v1.27.7 github.com/openkruise/kruise v1.2.0 - github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/client_golang v1.16.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.4 go.uber.org/atomic v1.11.0 go.uber.org/zap v1.24.0 - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 golang.org/x/sync v0.3.0 golang.org/x/time v0.3.0 k8s.io/api v0.26.1 @@ -33,8 +33,8 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -48,7 +48,7 @@ require ( github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect @@ -70,7 +70,7 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/net v0.17.0 // indirect diff --git a/go.sum b/go.sum index 14b6e279a..c86aa1f08 100644 --- a/go.sum +++ b/go.sum @@ -2,15 +2,19 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0 h1:8wVJL0HUP5yDFXvotdewORTw7Yu88JbreWN/mobSvsQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0 h1:bLRntPH25SkY1uZ/YZW+dmxNky9r1fAHvDFrzluo+4Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0/go.mod h1:TmXReXZ9yPp5D5TBRMTAtyz+UyOl15Py4hL5E5p6igQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 h1:sLZ/Y+P/5RRtsXWylBjB5lkgixYfm0MQPiwrSX//JSo= github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/k8s-work-api v0.4.3 h1:fxwO/QZftM3CW9FNl/JTHRQmfbQPa83VwOxR0HadECk= github.com/Azure/k8s-work-api v0.4.3/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -70,6 +74,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -160,6 +165,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= @@ -167,6 +173,7 @@ github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdO github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.10.0 h1:UkG7GPYkO4UZyLnyXjaWYcgOSONqwdBqFUT95ugmt6I= github.com/prometheus/procfs v0.10.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -187,6 +194,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -209,6 +217,7 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/pkg/authtoken/providers/azure/azure_msi.go b/pkg/authtoken/providers/azure/azure_msi.go index f1df46c02..3f6c7d183 100644 --- a/pkg/authtoken/providers/azure/azure_msi.go +++ b/pkg/authtoken/providers/azure/azure_msi.go @@ -46,7 +46,7 @@ func (a *AuthTokenProvider) FetchToken(ctx context.Context) (interfaces.AuthToke if err != nil { return token, fmt.Errorf("failed to create managed identity cred: %w", err) } - var azToken *azcore.AccessToken + var azToken azcore.AccessToken err = retry.OnError(retry.DefaultBackoff, func(err error) bool { return ctx.Err() == nil diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 2af14de6b..d1eb7f4b8 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -288,15 +288,30 @@ func buildFailedResourcePlacements(work *fleetv1beta1.Work) (isPending bool, res return false, nil } + // check if the work is generated by an enveloped object + envelopeType, isEnveloped := work.GetLabels()[fleetv1beta1.EnvelopeTypeLabel] + var envelopObjName, envelopObjNamespace string + if isEnveloped { + // If the work generated by an enveloped object, it must contain those labels. + envelopObjName = work.GetLabels()[fleetv1beta1.EnvelopeNameLabel] + envelopObjNamespace = work.GetLabels()[fleetv1beta1.EnvelopeNamespaceLabel] + } res = make([]fleetv1beta1.FailedResourcePlacement, 0, len(work.Status.ManifestConditions)) for _, manifestCondition := range work.Status.ManifestConditions { appliedCond = meta.FindStatusCondition(manifestCondition.Conditions, fleetv1beta1.WorkConditionTypeApplied) // collect if there is an explicit fail if appliedCond != nil && appliedCond.Status != metav1.ConditionTrue { - klog.V(2).InfoS("Find a failed to apply manifest", - "manifestName", manifestCondition.Identifier.Name, "group", manifestCondition.Identifier.Group, - "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind) - + if isEnveloped { + klog.V(2).InfoS("Find a failed to apply enveloped manifest", + "manifestName", manifestCondition.Identifier.Name, + "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind, + "envelopeType", envelopeType, "envelopObjName", envelopObjName, "envelopObjNamespace", envelopObjNamespace) + } else { + klog.V(2).InfoS("Find a failed to apply manifest", + "manifestName", manifestCondition.Identifier.Name, "group", manifestCondition.Identifier.Group, + "version", manifestCondition.Identifier.Version, "kind", manifestCondition.Identifier.Kind) + } failedManifest := fleetv1beta1.FailedResourcePlacement{ ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: manifestCondition.Identifier.Group, @@ -307,6 +322,13 @@ func buildFailedResourcePlacements(work *fleetv1beta1.Work) (isPending bool, res }, Condition: *appliedCond, } + if isEnveloped { + failedManifest.ResourceIdentifier.Envelope = &fleetv1beta1.EnvelopeIdentifier{ + Name: envelopObjName, + Namespace: envelopObjNamespace, + Type: fleetv1beta1.EnvelopeType(envelopeType), + } + } res = append(res, failedManifest) } } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index 09b63ff6d..ab7ede38b 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -22,6 +22,14 @@ import ( fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) +var statusCmpOptions = []cmp.Option{ + // ignore the message as we may change the message in the future + cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime"), + cmpopts.SortSlices(func(c1, c2 metav1.Condition) bool { + return c1.Type < c2.Type + }), +} + func TestSetPlacementStatus(t *testing.T) { crpGeneration := int64(25) selectedResources := []fleetv1beta1.ResourceIdentifier{ @@ -954,16 +962,219 @@ func TestSetPlacementStatus(t *testing.T) { if err := r.setPlacementStatus(context.Background(), crp, selectedResources, tc.latestPolicySnapshot, tc.latestResourceSnapshot); err != nil { t.Fatalf("setPlacementStatus() failed: %v", err) } - statusCmpOptions := []cmp.Option{ - // ignore the message as we may change the message in the future - cmpopts.IgnoreFields(metav1.Condition{}, "Message", "LastTransitionTime"), - cmpopts.SortSlices(func(c1, c2 metav1.Condition) bool { - return c1.Type < c2.Type - }), - } + if diff := cmp.Diff(tc.wantStatus, &crp.Status, statusCmpOptions...); diff != "" { t.Errorf("buildPlacementStatus() status mismatch (-want, +got):\n%s", diff) } }) } } + +func TestBuildFailedResourcePlacements(t *testing.T) { + tests := map[string]struct { + work *fleetv1beta1.Work + wantIsPending bool + wantRes []fleetv1beta1.FailedResourcePlacement + }{ + "pending if not applied": { + work: &fleetv1beta1.Work{}, + wantIsPending: true, + wantRes: nil, + }, + "No resource if applied successfully": { + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + wantIsPending: false, + wantRes: nil, + }, + "pending if applied not on the latest generation": { + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 2, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: string(fleetv1beta1.WorkConditionTypeApplied), + ObservedGeneration: 1, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + wantIsPending: true, + wantRes: nil, + }, + "pending if applied failed with multiple object": { + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: corev1.GroupName, + Version: "v1", + Kind: "secret", + Name: "secretName", + Namespace: "app", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: corev1.GroupName, + Version: "v1", + Kind: "pod", + Name: "secretPod", + Namespace: "app", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 2, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + }, + wantIsPending: false, + wantRes: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: corev1.GroupName, + Version: "v1", + Kind: "secret", + Name: "secretName", + Namespace: "app", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: corev1.GroupName, + Version: "v1", + Kind: "pod", + Name: "secretPod", + Namespace: "app", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 2, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + "pending if applied failed with an envelop object": { + work: &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: "bindingName", + fleetv1beta1.CRPTrackingLabel: "testCRPName", + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: "envelop-configmap", + fleetv1beta1.EnvelopeNamespaceLabel: "app", + }, + }, + Status: fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: corev1.GroupName, + Version: "v1", + Kind: "secret", + Name: "secretName", + Namespace: "app", + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + }, + wantIsPending: false, + wantRes: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: corev1.GroupName, + Version: "v1", + Kind: "secret", + Name: "secretName", + Namespace: "app", + Envelope: &fleetv1beta1.EnvelopeIdentifier{ + Name: "envelop-configmap", + Namespace: "app", + Type: fleetv1beta1.ConfigMapEnvelopeType, + }, + }, + Condition: metav1.Condition{ + + Type: fleetv1beta1.WorkConditionTypeApplied, + ObservedGeneration: 1, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + } + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + gotIsPending, gotRes := buildFailedResourcePlacements(tt.work) + if tt.wantIsPending != gotIsPending { + t.Errorf("buildFailedResourcePlacements `%s` mismatch, want: %t, got : %t", name, tt.wantIsPending, gotIsPending) + } + if diff := cmp.Diff(tt.wantRes, gotRes, statusCmpOptions...); diff != "" { + t.Errorf("buildFailedResourcePlacements `%s` status mismatch (-want, +got):\n%s", name, diff) + } + }) + } +} diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 71a44dec3..63d5ed762 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -13,10 +13,17 @@ import ( "strconv" "time" + "go.uber.org/atomic" + "golang.org/x/sync/errgroup" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" @@ -224,52 +231,98 @@ func (r *Reconciler) listAllWorksAssociated(ctx context.Context, resourceBinding // syncAllWork generates all the work for the resourceSnapshot and apply them to the corresponding target cluster. // it returns if we actually made any changes on the hub cluster. -func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, works map[string]*fleetv1beta1.Work) (bool, error) { - updateAny := false +func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1beta1.ClusterResourceBinding, existingWorks map[string]*fleetv1beta1.Work) (bool, error) { + updateAny := atomic.NewBool(false) resourceBindingRef := klog.KObj(resourceBinding) // Gather all the resource resourceSnapshots resourceSnapshots, err := r.fetchAllResourceSnapshots(ctx, resourceBinding) if err != nil { - // TODO(RZ): handle errResourceNotFullyCreated error + // TODO(RZ): handle errResourceNotFullyCreated error so we don't need to wait for all the snapshots to be created return false, err } - // create/update the corresponding work for each snapshot - activeWork := make(map[string]bool, len(resourceSnapshots)) - for _, snapshot := range resourceSnapshots { - // TODO(RZ): issue those requests in parallel to speed up the process - updated := false - workName, err := getWorkNameFromSnapshotName(snapshot) + // issue all the create/update requests for the corresponding works for each snapshot in parallel + activeWork := make(map[string]*fleetv1beta1.Work, len(resourceSnapshots)) + errs, cctx := errgroup.WithContext(ctx) + // generate work objects for each resource snapshot + for i := range resourceSnapshots { + snapshot := resourceSnapshots[i] + var newWork []*fleetv1beta1.Work + workNamePrefix, err := getWorkNamePrefixFromSnapshotName(snapshot) if err != nil { klog.ErrorS(err, "Encountered a mal-formatted resource snapshot", "resourceSnapshot", klog.KObj(snapshot)) return false, err } - activeWork[workName] = true - if updated, err = r.upsertWork(ctx, works[workName], workName, snapshot, resourceBinding); err != nil { - return false, err + var simpleManifests []fleetv1beta1.Manifest + for _, selectedResource := range snapshot.Spec.SelectedResources { + // we need to special treat configMap with envelopeConfigMapAnnotation annotation, + // so we need to check the GVK and annotation of the selected resource + var uResource unstructured.Unstructured + if err := uResource.UnmarshalJSON(selectedResource.Raw); err != nil { + klog.ErrorS(err, "work has invalid content", "snapshot", klog.KObj(snapshot), "selectedResource", selectedResource.Raw) + return false, controller.NewUnexpectedBehaviorError(err) + } + if uResource.GetObjectKind().GroupVersionKind() == utils.ConfigMapGVK && + len(uResource.GetAnnotations()[fleetv1beta1.EnvelopeConfigMapAnnotation]) != 0 { + // get a work object for the enveloped configMap + work, err := r.getConfigMapEnvelopWorkObj(ctx, workNamePrefix, resourceBinding, snapshot, &uResource) + if err != nil { + return false, err + } + newWork = append(newWork, work) + } else { + simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(selectedResource)) + } } - if updated { - updateAny = true + if len(simpleManifests) != 0 { + // generate a work object for the manifests if there are still any non enveloped resources + newWork = append(newWork, generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests)) + } else { + klog.V(2).InfoS("Skip generating work for the snapshot since there is no none-enveloped resource in the snapshot", "snapshot", klog.KObj(snapshot)) + } + // issue all the create/update requests for the corresponding works for each snapshot in parallel + for i := range newWork { + work := newWork[i] + activeWork[work.Name] = work + errs.Go(func() error { + updated, err := r.upsertWork(cctx, work, existingWorks[work.Name], snapshot) + if err != nil { + return err + } + if updated { + updateAny.Store(true) + } + return nil + }) } } // delete the works that are not associated with any resource snapshot - for _, work := range works { - if activeWork[work.Name] { + for i := range existingWorks { + work := existingWorks[i] + if _, exist := activeWork[work.Name]; exist { continue } - klog.V(2).InfoS("Delete the work that is not associated with any resource snapshot", "work", klog.KObj(work)) - if err := r.Client.Delete(ctx, work); err != nil { - if !apierrors.IsNotFound(err) { - klog.ErrorS(err, "Failed to delete the no longer needed work", "work", klog.KObj(work)) - return false, controller.NewAPIServerError(false, err) + errs.Go(func() error { + if err := r.Client.Delete(ctx, work); err != nil { + if !apierrors.IsNotFound(err) { + klog.ErrorS(err, "Failed to delete the no longer needed work", "work", klog.KObj(work)) + return controller.NewAPIServerError(false, err) + } } - } - updateAny = true + klog.V(2).InfoS("Deleted the work that is not associated with any resource snapshot", "work", klog.KObj(work)) + updateAny.Store(true) + return nil + }) } - klog.V(2).InfoS("Successfully synced all the work associated with the resourceBinding", "updateAny", updateAny, "resourceBinding", resourceBindingRef) - return updateAny, nil + + // wait for all the create/update/delete requests to finish + if updateErr := errs.Wait(); updateErr != nil { + return false, updateErr + } + klog.V(2).InfoS("Successfully synced all the work associated with the resourceBinding", "updateAny", updateAny.Load(), "resourceBinding", resourceBindingRef) + return updateAny.Load(), nil } // fetchAllResourceSnapshots gathers all the resource snapshots for the resource binding. @@ -328,25 +381,47 @@ func (r *Reconciler) fetchAllResourceSnapshots(ctx context.Context, resourceBind return resourceSnapshots, nil } -// upsertWork creates or updates the work for the corresponding resource snapshot. -// it returns if any change is made to the work and the possible error code. -func (r *Reconciler) upsertWork(ctx context.Context, work *fleetv1beta1.Work, workName string, resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, - resourceBinding *fleetv1beta1.ClusterResourceBinding) (bool, error) { - needCreate := false - var workObj klog.ObjectRef - resourceBindingObj := klog.KObj(resourceBinding) - resourceSnapshotObj := klog.KObj(resourceSnapshot) - // we already checked the label in fetchAllResourceSnapShots function so no need to check again - resourceIndex, _ := labels.ExtractResourceIndexFromClusterResourceSnapshot(resourceSnapshot) - if work == nil { - needCreate = true - work = &fleetv1beta1.Work{ +// getConfigMapEnvelopWorkObj first try to locate a work object for the corresponding envelopObj of type configMap. +// we create a new one if the work object doesn't exist. We do this to avoid repeatedly delete and create the same work object. +// TODO: take into consider the override policy in the future +func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePrefix string, resourceBinding *fleetv1beta1.ClusterResourceBinding, + resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, envelopeObj *unstructured.Unstructured) (*fleetv1beta1.Work, error) { + // we group all the resources in one configMap to one work + manifest, err := extractResFromConfigMap(envelopeObj) + if err != nil { + klog.ErrorS(err, "configMap has invalid content", "snapshot", klog.KObj(resourceSnapshot), + "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) + return nil, controller.NewUserError(err) + } + klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), "configMapWrapper", klog.KObj(envelopeObj)) + // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster + envelopWorkLabelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), + } + workList := &fleetv1beta1.WorkList{} + if err := r.Client.List(ctx, workList, envelopWorkLabelMatcher); err != nil { + return nil, controller.NewAPIServerError(true, err) + } + // we need to create a new work object + if len(workList.Items) == 0 { + // we limit the CRP name length to be 63 (DNS1123LabelMaxLength) characters, + // so we have plenty of characters left to fit into 253 (DNS1123SubdomainMaxLength) characters for a CR + workName := fmt.Sprintf(fleetv1beta1.WorkNameWithConfigEnvelopeFmt, workNamePrefix, uuid.NewUUID()) + return &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), Labels: map[string]string{ - fleetv1beta1.ParentBindingLabel: resourceBinding.Name, - fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: envelopeObj.GetName(), + fleetv1beta1.EnvelopeNamespaceLabel: envelopeObj.GetNamespace(), }, OwnerReferences: []metav1.OwnerReference{ { @@ -358,53 +433,95 @@ func (r *Reconciler) upsertWork(ctx context.Context, work *fleetv1beta1.Work, wo }, }, }, - } - } else { - // check if we need to update the work - workObj = klog.KObj(work) - workResourceIndex, err := labels.ExtractResourceSnapshotIndexFromWork(work) - if err != nil { - klog.ErrorS(err, "work has invalid parent resource index", "work", workObj) - return false, controller.NewUnexpectedBehaviorError(err) - } - if workResourceIndex == resourceIndex { - // no need to do anything since the resource snapshot is immutable. - klog.V(2).InfoS("Work is already associated with the desired resourceSnapshot", "work", workObj, "resourceSnapshot", resourceSnapshotObj) - return false, nil - } + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: manifest, + }, + }, + }, nil } - workObj = klog.KObj(work) - // the work is pointing to a different resource snapshot, need to reset the manifest list - // reset the manifest list regardless and make sure the work is pointing to the right resource snapshot + if len(workList.Items) > 1 { + // return error here won't get us out of this + klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("find %d work representing configMap", len(workList.Items))), + "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) + } + // we just pick the first one if there are more than one. + work := workList.Items[0] work.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] - work.Spec.Workload.Manifests = make([]fleetv1beta1.Manifest, 0) - for _, selectedResource := range resourceSnapshot.Spec.SelectedResources { - work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, fleetv1beta1.Manifest(selectedResource)) + work.Spec.Workload.Manifests = manifest + return &work, nil +} + +// generateSnapshotWorkObj generates the work object for the corresponding snapshot +// TODO: take into consider the override policy in the future +func generateSnapshotWorkObj(workName string, resourceBinding *fleetv1beta1.ClusterResourceBinding, resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, manifest []fleetv1beta1.Manifest) *fleetv1beta1.Work { + work := &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.Spec.TargetCluster), + Labels: map[string]string{ + fleetv1beta1.ParentBindingLabel: resourceBinding.Name, + fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], + fleetv1beta1.ParentResourceSnapshotIndexLabel: resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel], + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: resourceBinding.Kind, + Name: resourceBinding.Name, + UID: resourceBinding.UID, + BlockOwnerDeletion: pointer.Bool(true), // make sure that the k8s will call work delete when the binding is deleted + }, + }, + }, } + work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifest...) + return work +} - // upsert the work - if needCreate { - if err := r.Client.Create(ctx, work); err != nil { - klog.ErrorS(err, "Failed to create the work associated with the resourceSnapshot", "resourceBinding", resourceBindingObj, - "resourceSnapshot", resourceSnapshotObj, "work", workObj) - return true, controller.NewCreateIgnoreAlreadyExistError(err) +// upsertWork creates or updates the new work for the corresponding resource snapshot. +// it returns if any change is made to the existing work and the possible error code. +func (r *Reconciler) upsertWork(ctx context.Context, newWork, existingWork *fleetv1beta1.Work, resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (bool, error) { + workObj := klog.KObj(newWork) + resourceSnapshotObj := klog.KObj(resourceSnapshot) + if existingWork == nil { + if err := r.Client.Create(ctx, newWork); err != nil { + klog.ErrorS(err, "Failed to create the work associated with the resourceSnapshot", "resourceSnapshot", resourceSnapshotObj, "work", workObj) + return false, controller.NewCreateIgnoreAlreadyExistError(err) } - } else if err := r.Client.Update(ctx, work); err != nil { - klog.ErrorS(err, "Failed to update the work associated with the resourceSnapshot", "resourceBinding", resourceBindingObj, + klog.V(2).InfoS("Successfully create the work associated with the resourceSnapshot", "resourceSnapshot", resourceSnapshotObj, "work", workObj) + return true, nil + } + // check if we need to update the existing work object + workResourceIndex, err := labels.ExtractResourceSnapshotIndexFromWork(existingWork) + if err != nil { + klog.ErrorS(err, "work has invalid parent resource index", "work", workObj) + return false, controller.NewUnexpectedBehaviorError(err) + } + // we already checked the label in fetchAllResourceSnapShots function so no need to check again + resourceIndex, _ := labels.ExtractResourceIndexFromClusterResourceSnapshot(resourceSnapshot) + if workResourceIndex == resourceIndex { + // no need to do anything if the work is generated from the same resource snapshot group since the resource snapshot is immutable. + klog.V(2).InfoS("Work is already associated with the desired resourceSnapshot", "resourceIndex", resourceIndex, "work", workObj, "resourceSnapshot", resourceSnapshotObj) + return false, nil + } + // need to update the existing work, only two possible changes: + existingWork.Labels[fleetv1beta1.ParentResourceSnapshotIndexLabel] = resourceSnapshot.Labels[fleetv1beta1.ResourceIndexLabel] + existingWork.Spec.Workload.Manifests = newWork.Spec.Workload.Manifests + if err := r.Client.Update(ctx, existingWork); err != nil { + klog.ErrorS(err, "Failed to update the work associated with the resourceSnapshot", "resourceSnapshot", resourceSnapshotObj, "work", workObj) return true, controller.NewUpdateIgnoreConflictError(err) } - - klog.V(2).InfoS("Successfully upsert the work associated with the resourceSnapshot", "isCreate", needCreate, - "resourceBinding", resourceBindingObj, "resourceSnapshot", resourceSnapshotObj, "work", workObj) + klog.V(2).InfoS("Successfully updated the work associated with the resourceSnapshot", "resourceSnapshot", resourceSnapshotObj, "work", workObj) return true, nil } -// getWorkNameFromSnapshotName extract the CRP and sub-index name from the corresponding resource snapshot. -// The corresponding work name is the CRP name + sub-index if there is a sub-index. Otherwise, it is the CRP name +"-work". +// getWorkNamePrefixFromSnapshotName extract the CRP and sub-index name from the corresponding resource snapshot. +// The corresponding work name prefix is the CRP name + sub-index if there is a sub-index. Otherwise, it is the CRP name +"-work". // For example, if the resource snapshot name is "crp-1-0", the corresponding work name is "crp-0". // If the resource snapshot name is "crp-1", the corresponding work name is "crp-work". -func getWorkNameFromSnapshotName(resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (string, error) { +func getWorkNamePrefixFromSnapshotName(resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (string, error) { // The validation webhook should make sure the label and annotation are valid on all resource snapshot. // We are just being defensive here. crpName, exist := resourceSnapshot.Labels[fleetv1beta1.CRPTrackingLabel] @@ -449,6 +566,25 @@ func buildAllWorkAppliedCondition(works map[string]*fleetv1beta1.Work, binding * } } +func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1beta1.Manifest, error) { + manifests := make([]fleetv1beta1.Manifest, 0) + var configMap v1.ConfigMap + err := runtime.DefaultUnstructuredConverter.FromUnstructured(uConfigMap.Object, &configMap) + if err != nil { + return nil, err + } + for _, value := range configMap.Data { + content, err := yaml.ToJSON([]byte(value)) + if err != nil { + return nil, err + } + manifests = append(manifests, fleetv1beta1.Manifest{ + RawExtension: runtime.RawExtension{Raw: content}, + }) + } + return manifests, nil +} + // SetupWithManager sets up the controller with the Manager. // It watches binding events and also update/delete events for work. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { @@ -507,7 +643,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { // we only need to handle the case the applied condition is flipped between true and NOT true between the // new and old work objects. Otherwise, it won't affect the binding applied condition if condition.IsConditionStatusTrue(oldAppliedStatus, oldWork.GetGeneration()) == condition.IsConditionStatusTrue(newAppliedStatus, newWork.GetGeneration()) { - klog.V(2).InfoS("The work applied condition didn't flip between true and false", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) + klog.V(2).InfoS("The work applied condition didn't flip between true and false, no need to reconcile", "oldWork", klog.KObj(oldWork), "newWork", klog.KObj(newWork)) return } klog.V(2).InfoS("Received a work update event", "work", klog.KObj(newWork), "parentBindingName", parentBindingName) diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index da326859e..74b458560 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" @@ -36,12 +37,13 @@ var ( ignoreConditionOption = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "Message") ) +const ( + timeout = time.Second * 6 + duration = time.Second * 20 + interval = time.Millisecond * 250 +) + var _ = Describe("Test Work Generator Controller", func() { - const ( - timeout = time.Second * 6 - duration = time.Second * 20 - interval = time.Millisecond * 250 - ) Context("Test Bound ClusterResourceBinding", func() { var binding *fleetv1beta1.ClusterResourceBinding @@ -375,7 +377,257 @@ var _ = Describe("Test Work Generator Controller", func() { diff = cmp.Diff(wantMC, binding.Status, ignoreConditionOption) return diff }, timeout, interval).Should(BeEmpty(), fmt.Sprintf("binding(%s) mismatch (-want +got):\n%s", binding.Name, diff)) + }) + }) + + FContext("Test Bound ClusterResourceBinding with a single resource snapshot with envelop objects", func() { + var masterSnapshot *fleetv1beta1.ClusterResourceSnapshot + + BeforeEach(func() { + masterSnapshot = generateResourceSnapshot(1, 1, 0, [][]byte{ + testConfigMap, testEnvelopConfigMap, testClonesetCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("master resource snapshot %s created", masterSnapshot.Name)) + binding = generateClusterResourceBinding(fleetv1beta1.BindingStateBound, masterSnapshot.Name, memberClusterName) + Expect(k8sClient.Create(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s created", binding.Name)) + }) + AfterEach(func() { + By("Deleting master clusterResourceSnapshot") + Expect(k8sClient.Delete(ctx, masterSnapshot)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{})) + }) + + It("Should create enveloped work object in the target namespace with master resource snapshot only", func() { + // check the work that contains none enveloped object is created by now + work := fleetv1beta1.Work{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(fleetv1beta1.FirstWorkNameFmt, testCRPName), Namespace: namespaceName}, &work) + }, timeout, interval).Should(Succeed(), "Failed to get the expected work in hub cluster") + By(fmt.Sprintf("work %s is created in %s", work.Name, work.Namespace)) + //inspect the work + wantWork := fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.FirstWorkNameFmt, testCRPName), + Namespace: namespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: pointer.Bool(true), + }, + }, + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.ParentResourceSnapshotIndexLabel: "1", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testConfigMap}}, + {RawExtension: runtime.RawExtension{Raw: testClonesetCRD}}, + {RawExtension: runtime.RawExtension{Raw: testNameSpace}}, + }, + }, + }, + } + diff := cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + var workList fleetv1beta1.WorkList + fetchEnvelopedWork(&workList, binding) + work = workList.Items[0] + By(fmt.Sprintf("envelope work %s is created in %s", work.Name, work.Namespace)) + //inspect the envelope work + wantWork = fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: work.Name, + Namespace: namespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: pointer.Bool(true), + }, + }, + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.ParentResourceSnapshotIndexLabel: "1", + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: "envelop-configmap", + fleetv1beta1.EnvelopeNamespaceLabel: "app", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testEnvelopeResourceQuota}}, + {RawExtension: runtime.RawExtension{Raw: testEnvelopeWebhook}}, + }, + }, + }, + } + diff = cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("envelop work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + }) + + It("Should modify the enveloped work object with the same name", func() { + // make sure the enveloped work is created + var workList fleetv1beta1.WorkList + fetchEnvelopedWork(&workList, binding) + // create a second snapshot with a modified enveloped object + masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ + testEnvelopConfigMap2, testClonesetCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("another master resource snapshot %s created", masterSnapshot.Name)) + // update binding + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + binding.Spec.ResourceSnapshotName = masterSnapshot.Name + Expect(k8sClient.Update(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s updated", binding.Name)) + // check the binding status till the bound condition is true for the second generation + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding); err != nil { + return false + } + if binding.GetGeneration() <= 1 { + return false + } + // only check the bound status as the applied status reason changes depends on where the reconcile logic is + return condition.IsConditionStatusTrue( + meta.FindStatusCondition(binding.Status.Conditions, string(fleetv1beta1.ResourceBindingBound)), binding.GetGeneration()) + }, timeout, interval).Should(BeTrue(), fmt.Sprintf("binding(%s) condition should be true", binding.Name)) + By(fmt.Sprintf("resource binding %s is reconciled", binding.Name)) + // check the work that contains none enveloped object is updated + work := fleetv1beta1.Work{} + Eventually(func() error { + return k8sClient.Get(ctx, types.NamespacedName{Name: fmt.Sprintf(fleetv1beta1.FirstWorkNameFmt, testCRPName), Namespace: namespaceName}, &work) + }, timeout, interval).Should(Succeed(), "Failed to get the expected work in hub cluster") + By(fmt.Sprintf("work %s is created in %s", work.Name, work.Namespace)) + //inspect the work + wantWork := fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.FirstWorkNameFmt, testCRPName), + Namespace: namespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: pointer.Bool(true), + }, + }, + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.ParentResourceSnapshotIndexLabel: "2", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testClonesetCRD}}, + {RawExtension: runtime.RawExtension{Raw: testNameSpace}}, + }, + }, + }, + } + diff := cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + // check the enveloped work is updated + fetchEnvelopedWork(&workList, binding) + work = workList.Items[0] + By(fmt.Sprintf("envelope work %s is updated in %s", work.Name, work.Namespace)) + //inspect the envelope work + wantWork = fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: work.Name, + Namespace: namespaceName, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: fleetv1beta1.GroupVersion.String(), + Kind: "ClusterResourceBinding", + Name: binding.Name, + UID: binding.UID, + BlockOwnerDeletion: pointer.Bool(true), + }, + }, + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.ParentResourceSnapshotIndexLabel: "2", + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: "envelop-configmap", + fleetv1beta1.EnvelopeNamespaceLabel: "app", + }, + }, + Spec: fleetv1beta1.WorkSpec{ + Workload: fleetv1beta1.WorkloadTemplate{ + Manifests: []fleetv1beta1.Manifest{ + {RawExtension: runtime.RawExtension{Raw: testEnvelopeWebhook}}, + }, + }, + }, + } + diff = cmp.Diff(wantWork, work, ignoreWorkOption, ignoreTypeMeta) + Expect(diff).Should(BeEmpty(), fmt.Sprintf("envelop work(%s) mismatch (-want +got):\n%s", work.Name, diff)) + }) + + It("Should delete the enveloped work object in the target namespace after it's removed from snapshot", func() { + // make sure the enveloped work is created + var workList fleetv1beta1.WorkList + fetchEnvelopedWork(&workList, binding) + By("create a second snapshot without an enveloped object") + // create a second snapshot without an enveloped object + masterSnapshot = generateResourceSnapshot(2, 1, 0, [][]byte{ + testClonesetCRD, testNameSpace, + }) + Expect(k8sClient.Create(ctx, masterSnapshot)).Should(Succeed()) + By(fmt.Sprintf("another master resource snapshot %s created", masterSnapshot.Name)) + // update binding + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding)).Should(Succeed()) + binding.Spec.ResourceSnapshotName = masterSnapshot.Name + Expect(k8sClient.Update(ctx, binding)).Should(Succeed()) + By(fmt.Sprintf("resource binding %s updated", binding.Name)) + // check the binding status till the bound condition is true for the second binding generation + Eventually(func() bool { + if err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding); err != nil { + return false + } + if binding.GetGeneration() <= 1 { + return false + } + // only check the bound status as the applied status reason changes depends on where the reconcile logic is + return condition.IsConditionStatusTrue( + meta.FindStatusCondition(binding.Status.Conditions, string(fleetv1beta1.ResourceBindingBound)), binding.GetGeneration()) + }, timeout, interval).Should(BeTrue(), fmt.Sprintf("binding(%s) condition should be true", binding.Name)) + By(fmt.Sprintf("resource binding %s is reconciled", binding.Name)) + // check the enveloped work is deleted + Eventually(func() error { + envelopWorkLabelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: "envelop-configmap", + fleetv1beta1.EnvelopeNamespaceLabel: "app", + } + if err := k8sClient.List(ctx, &workList, envelopWorkLabelMatcher); err != nil { + return err + } + if len(workList.Items) != 0 { + return fmt.Errorf("expect to not get any enveloped work but got %d", len(workList.Items)) + } + return nil + }, timeout, interval).Should(Succeed(), "Failed to delete the expected enveloped work in hub cluster") }) }) @@ -710,6 +962,26 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) +func fetchEnvelopedWork(workList *fleetv1beta1.WorkList, binding *fleetv1beta1.ClusterResourceBinding) { + // try to locate the work that contains enveloped object + Eventually(func() error { + envelopWorkLabelMatcher := client.MatchingLabels{ + fleetv1beta1.ParentBindingLabel: binding.Name, + fleetv1beta1.CRPTrackingLabel: testCRPName, + fleetv1beta1.EnvelopeTypeLabel: string(fleetv1beta1.ConfigMapEnvelopeType), + fleetv1beta1.EnvelopeNameLabel: "envelop-configmap", + fleetv1beta1.EnvelopeNamespaceLabel: "app", + } + if err := k8sClient.List(ctx, workList, envelopWorkLabelMatcher); err != nil { + return err + } + if len(workList.Items) != 1 { + return fmt.Errorf("expect to get one enveloped work but got %d", len(workList.Items)) + } + return nil + }, timeout, interval).Should(Succeed(), "Failed to get the expected enveloped work in hub cluster") +} + func generateClusterResourceBinding(state fleetv1beta1.BindingState, resourceSnapshotName, targetCluster string) *fleetv1beta1.ClusterResourceBinding { return &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controllers/workgenerator/controller_test.go b/pkg/controllers/workgenerator/controller_test.go index ffd39f360..b59ccb9a7 100644 --- a/pkg/controllers/workgenerator/controller_test.go +++ b/pkg/controllers/workgenerator/controller_test.go @@ -17,7 +17,7 @@ import ( "go.goms.io/fleet/pkg/utils/controller" ) -func Test_getWorkNameFromSnapshotName(t *testing.T) { +func TestGetWorkNamePrefixFromSnapshotName(t *testing.T) { tests := map[string]struct { resourceSnapshot *fleetv1beta1.ClusterResourceSnapshot wantErr error @@ -110,19 +110,19 @@ func Test_getWorkNameFromSnapshotName(t *testing.T) { } for name, tt := range tests { t.Run(name, func(t *testing.T) { - workName, err := getWorkNameFromSnapshotName(tt.resourceSnapshot) + workName, err := getWorkNamePrefixFromSnapshotName(tt.resourceSnapshot) if !errors.Is(err, tt.wantErr) { - t.Errorf("failed getWorkNameFromSnapshotName test `%s` error = %v, wantErr %v", name, err, tt.wantErr) + t.Errorf("failed getWorkNamePrefixFromSnapshotName test `%s` error = %v, wantErr %v", name, err, tt.wantErr) return } if workName != tt.wantedName { - t.Errorf("getWorkNameFromSnapshotName test `%s` workName = `%v`, wantedName `%v`", name, workName, tt.wantedName) + t.Errorf("getWorkNamePrefixFromSnapshotName test `%s` workName = `%v`, wantedName `%v`", name, workName, tt.wantedName) } }) } } -func Test_buildAllWorkAppliedCondition(t *testing.T) { +func TestBuildAllWorkAppliedCondition(t *testing.T) { tests := map[string]struct { works map[string]*fleetv1beta1.Work generation int64 diff --git a/pkg/controllers/workgenerator/manifests/resourceQuota.yaml b/pkg/controllers/workgenerator/manifests/resourceQuota.yaml new file mode 100644 index 000000000..2db32cb36 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/resourceQuota.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo + namespace: app +spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml new file mode 100644 index 000000000..3f232dfa4 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml @@ -0,0 +1,51 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: envelop-configmap + namespace: app + annotations: + kubernetes-fleet.io/envelope-configmap: "true" +data: + resourceQuota.yaml: | + apiVersion: v1 + kind: ResourceQuota + metadata: + name: mem-cpu-demo + namespace: app + spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi + webhook.yaml: | + apiVersion: admissionregistration.k8s.io/v1 + kind: MutatingWebhookConfiguration + metadata: + creationTimestamp: null + labels: + azure-workload-identity.io/system: "true" + name: azure-wi-webhook-mutating-webhook-configuration + webhooks: + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: azure-wi-webhook-webhook-service + namespace: app + path: /mutate-v1-pod + failurePolicy: Fail + matchPolicy: Equivalent + name: mutation.azure-workload-identity.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None \ No newline at end of file diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml new file mode 100644 index 000000000..489969d12 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: envelop-configmap + namespace: app + annotations: + kubernetes-fleet.io/envelope-configmap: "true" +data: + webhook.yaml: | + apiVersion: admissionregistration.k8s.io/v1 + kind: MutatingWebhookConfiguration + metadata: + creationTimestamp: null + labels: + azure-workload-identity.io/system: "true" + name: azure-wi-webhook-mutating-webhook-configuration + webhooks: + - admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: azure-wi-webhook-webhook-service + namespace: app + path: /mutate-v1-pod + failurePolicy: Fail + matchPolicy: Equivalent + name: mutation.azure-workload-identity.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None \ No newline at end of file diff --git a/pkg/controllers/workgenerator/manifests/webhook.yaml b/pkg/controllers/workgenerator/manifests/webhook.yaml new file mode 100644 index 000000000..e360fa859 --- /dev/null +++ b/pkg/controllers/workgenerator/manifests/webhook.yaml @@ -0,0 +1,29 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + azure-workload-identity.io/system: "true" + name: azure-wi-webhook-mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: azure-wi-webhook-webhook-service + namespace: app + path: /mutate-v1-pod + failurePolicy: Fail + matchPolicy: Equivalent + name: mutation.azure-workload-identity.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index eeb095796..474e967fb 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -39,7 +39,10 @@ var ( cancel context.CancelFunc // pre loaded test manifests - testClonesetCRD, testNameSpace, testCloneset, testConfigMap, testPdb []byte + testClonesetCRD, testNameSpace, testCloneset, testConfigMap, testEnvelopConfigMap, testEnvelopConfigMap2, testPdb []byte + + // the content of the enveloped resources + testEnvelopeWebhook, testEnvelopeResourceQuota []byte ) func TestAPIs(t *testing.T) { @@ -136,9 +139,33 @@ func readTestManifests() { testConfigMap, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) + By("Read testEnvelopConfigMap resource") + rawByte, err = os.ReadFile("manifests/test-envelop-configmap.yaml") + Expect(err).Should(Succeed()) + testEnvelopConfigMap, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + + By("Read testEnvelopConfigMap2 resource") + rawByte, err = os.ReadFile("manifests/test-envelop-configmap2.yaml") + Expect(err).Should(Succeed()) + testEnvelopConfigMap2, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + By("Read PodDisruptionBudget") rawByte, err = os.ReadFile("manifests/test_pdb.yaml") Expect(err).Should(Succeed()) testPdb, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) + + By("Read EnvelopeWebhook") + rawByte, err = os.ReadFile("manifests/webhook.yaml") + Expect(err).Should(Succeed()) + testEnvelopeWebhook, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) + + By("Read ResourceQuota") + rawByte, err = os.ReadFile("manifests/resourceQuota.yaml") + Expect(err).Should(Succeed()) + testEnvelopeResourceQuota, err = yaml.ToJSON(rawByte) + Expect(err).Should(Succeed()) } diff --git a/pkg/utils/common.go b/pkg/utils/common.go index 5c34c9456..fe25bf42e 100644 --- a/pkg/utils/common.go +++ b/pkg/utils/common.go @@ -158,6 +158,12 @@ var ( Version: corev1.SchemeGroupVersion.Version, Resource: "services", } + + ConfigMapGVK = schema.GroupVersionKind{ + Group: corev1.GroupName, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ConfigMap", + } ) // RandSecureInt returns a uniform random value in [1, max] or panic. diff --git a/test/integration/cluster_placement_test.go b/test/integration/cluster_placement_test.go index 0a4c3bfae..206a7934d 100644 --- a/test/integration/cluster_placement_test.go +++ b/test/integration/cluster_placement_test.go @@ -10,7 +10,6 @@ import ( "reflect" "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" . "github.com/onsi/ginkgo/v2" @@ -1628,7 +1627,7 @@ var _ = Describe("Test Cluster Resource Placement Controller", func() { Namespace: ns.Name, }, Spec: kruisev1alpha1.CloneSetSpec{ - Replicas: to.Int32Ptr(20), + Replicas: pointer.Int32(20), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app.kubernetes.io/name": "test-clone-set"}, }, From dc20bebc23037034aae79eea91b516ab9f9c15c3 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Mon, 9 Oct 2023 14:36:44 -0700 Subject: [PATCH 2/6] address comments --- apis/placement/v1beta1/commons.go | 2 +- pkg/controllers/workgenerator/controller.go | 5 +++-- .../manifests/{resourceQuota.yaml => resourcequota.yaml} | 0 .../workgenerator/manifests/test-envelop-configmap.yaml | 2 +- .../workgenerator/manifests/test-envelop-configmap2.yaml | 2 +- pkg/controllers/workgenerator/suite_test.go | 2 +- 6 files changed, 7 insertions(+), 6 deletions(-) rename pkg/controllers/workgenerator/manifests/{resourceQuota.yaml => resourcequota.yaml} (100%) diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index e74debac4..f5e682928 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -79,5 +79,5 @@ const ( // PreviousBindingStateAnnotation is the annotation that records the previous state of a binding. // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. - PreviousBindingStateAnnotation = fleetPrefix + "PreviousBindingState" + PreviousBindingStateAnnotation = fleetPrefix + "previous-binding-state" ) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index 63d5ed762..bd7539209 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -279,7 +279,7 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be // generate a work object for the manifests if there are still any non enveloped resources newWork = append(newWork, generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests)) } else { - klog.V(2).InfoS("Skip generating work for the snapshot since there is no none-enveloped resource in the snapshot", "snapshot", klog.KObj(snapshot)) + klog.V(2).InfoS("the snapshot contains enveloped resource only", "snapshot", klog.KObj(snapshot)) } // issue all the create/update requests for the corresponding works for each snapshot in parallel for i := range newWork { @@ -393,7 +393,8 @@ func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePre "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) return nil, controller.NewUserError(err) } - klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), "configMapWrapper", klog.KObj(envelopeObj)) + klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), + "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster envelopWorkLabelMatcher := client.MatchingLabels{ fleetv1beta1.ParentBindingLabel: resourceBinding.Name, diff --git a/pkg/controllers/workgenerator/manifests/resourceQuota.yaml b/pkg/controllers/workgenerator/manifests/resourcequota.yaml similarity index 100% rename from pkg/controllers/workgenerator/manifests/resourceQuota.yaml rename to pkg/controllers/workgenerator/manifests/resourcequota.yaml diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml index 3f232dfa4..451947aae 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap.yaml @@ -48,4 +48,4 @@ data: - UPDATE resources: - pods - sideEffects: None \ No newline at end of file + sideEffects: None diff --git a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml index 489969d12..a580d6971 100644 --- a/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml +++ b/pkg/controllers/workgenerator/manifests/test-envelop-configmap2.yaml @@ -36,4 +36,4 @@ data: - UPDATE resources: - pods - sideEffects: None \ No newline at end of file + sideEffects: None diff --git a/pkg/controllers/workgenerator/suite_test.go b/pkg/controllers/workgenerator/suite_test.go index 474e967fb..a02d89a8c 100644 --- a/pkg/controllers/workgenerator/suite_test.go +++ b/pkg/controllers/workgenerator/suite_test.go @@ -164,7 +164,7 @@ func readTestManifests() { Expect(err).Should(Succeed()) By("Read ResourceQuota") - rawByte, err = os.ReadFile("manifests/resourceQuota.yaml") + rawByte, err = os.ReadFile("manifests/resourcequota.yaml") Expect(err).Should(Succeed()) testEnvelopeResourceQuota, err = yaml.ToJSON(rawByte) Expect(err).Should(Succeed()) From fa8e1b5a04c148198d0596b61a2faa7cd2fa1d65 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Mon, 9 Oct 2023 23:03:59 -0700 Subject: [PATCH 3/6] fix UT --- pkg/controllers/workgenerator/controller.go | 17 ++++++++++++++--- .../controller_integration_test.go | 2 +- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index bd7539209..a2a598b5f 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -10,7 +10,9 @@ import ( "context" "errors" "fmt" + "sort" "strconv" + "strings" "time" "go.uber.org/atomic" @@ -396,6 +398,7 @@ func (r *Reconciler) getConfigMapEnvelopWorkObj(ctx context.Context, workNamePre klog.V(2).InfoS("Successfully extract the enveloped resources from the configMap", "numOfResources", len(manifest), "snapshot", klog.KObj(resourceSnapshot), "resourceBinding", klog.KObj(resourceBinding), "configMapWrapper", klog.KObj(envelopeObj)) // Try to see if we already have a work represent the same enveloped object for this CRP in the same cluster + // The ParentResourceSnapshotIndexLabel can change between snapshots so we have to exclude that label in the match envelopWorkLabelMatcher := client.MatchingLabels{ fleetv1beta1.ParentBindingLabel: resourceBinding.Name, fleetv1beta1.CRPTrackingLabel: resourceBinding.Labels[fleetv1beta1.CRPTrackingLabel], @@ -574,15 +577,23 @@ func extractResFromConfigMap(uConfigMap *unstructured.Unstructured) ([]fleetv1be if err != nil { return nil, err } + // the list order is not stable as the map traverse is random for _, value := range configMap.Data { - content, err := yaml.ToJSON([]byte(value)) - if err != nil { - return nil, err + content, jsonErr := yaml.ToJSON([]byte(value)) + if jsonErr != nil { + return nil, jsonErr } manifests = append(manifests, fleetv1beta1.Manifest{ RawExtension: runtime.RawExtension{Raw: content}, }) } + // stable sort the manifests so that we can have a deterministic order + sort.Slice(manifests, func(i, j int) bool { + obj1 := manifests[i].Raw + obj2 := manifests[j].Raw + // order by its json formatted string + return strings.Compare(string(obj1), string(obj2)) > 0 + }) return manifests, nil } diff --git a/pkg/controllers/workgenerator/controller_integration_test.go b/pkg/controllers/workgenerator/controller_integration_test.go index 74b458560..8e1dd8c90 100644 --- a/pkg/controllers/workgenerator/controller_integration_test.go +++ b/pkg/controllers/workgenerator/controller_integration_test.go @@ -380,7 +380,7 @@ var _ = Describe("Test Work Generator Controller", func() { }) }) - FContext("Test Bound ClusterResourceBinding with a single resource snapshot with envelop objects", func() { + Context("Test Bound ClusterResourceBinding with a single resource snapshot with envelop objects", func() { var masterSnapshot *fleetv1beta1.ClusterResourceSnapshot BeforeEach(func() { From 55b2d45a6a062b1200c1280ab28e74dbdd752809 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 12 Oct 2023 12:57:49 -0700 Subject: [PATCH 4/6] fix concurrent race --- go.sum | 42 +++++---------------- pkg/controllers/workgenerator/controller.go | 8 ++-- 2 files changed, 15 insertions(+), 35 deletions(-) diff --git a/go.sum b/go.sum index c86aa1f08..1872fd304 100644 --- a/go.sum +++ b/go.sum @@ -1,19 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0 h1:8wVJL0HUP5yDFXvotdewORTw7Yu88JbreWN/mobSvsQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.0/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0 h1:bLRntPH25SkY1uZ/YZW+dmxNky9r1fAHvDFrzluo+4Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.13.0/go.mod h1:TmXReXZ9yPp5D5TBRMTAtyz+UyOl15Py4hL5E5p6igQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1 h1:sLZ/Y+P/5RRtsXWylBjB5lkgixYfm0MQPiwrSX//JSo= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/k8s-work-api v0.4.3 h1:fxwO/QZftM3CW9FNl/JTHRQmfbQPa83VwOxR0HadECk= github.com/Azure/k8s-work-api v0.4.3/go.mod h1:FOGJkJ+uxjWlvUgmqUlRcmr4Q2ijocrUO/aLJv827y8= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -38,8 +33,7 @@ github.com/crossplane/crossplane-runtime v0.19.2/go.mod h1:OJQ1NxtQK2ZTRmvtnQPoy github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -72,8 +66,7 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -110,7 +103,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -145,8 +137,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= @@ -155,7 +145,6 @@ github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/openkruise/kruise v1.2.0 h1:EPhaFPe1LyBT2Z7ny5nhKymm6LewNV7nAfS/dDh/1BU= github.com/openkruise/kruise v1.2.0/go.mod h1:R0Nr5GmyxPMncBYvRIJXmFeji9j3AS1iGX35srpxOb4= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -163,16 +152,14 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.0 h1:UkG7GPYkO4UZyLnyXjaWYcgOSONqwdBqFUT95ugmt6I= -github.com/prometheus/procfs v0.10.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= @@ -192,8 +179,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= @@ -215,8 +202,7 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -235,12 +221,9 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= @@ -262,13 +245,11 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -280,7 +261,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -339,7 +319,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -347,7 +326,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index a2a598b5f..c198b38d3 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -272,6 +272,7 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be if err != nil { return false, err } + activeWork[work.Name] = work newWork = append(newWork, work) } else { simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(selectedResource)) @@ -279,16 +280,17 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be } if len(simpleManifests) != 0 { // generate a work object for the manifests if there are still any non enveloped resources - newWork = append(newWork, generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests)) + work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests) + activeWork[work.Name] = work + newWork = append(newWork, work) } else { klog.V(2).InfoS("the snapshot contains enveloped resource only", "snapshot", klog.KObj(snapshot)) } // issue all the create/update requests for the corresponding works for each snapshot in parallel for i := range newWork { work := newWork[i] - activeWork[work.Name] = work errs.Go(func() error { - updated, err := r.upsertWork(cctx, work, existingWorks[work.Name], snapshot) + updated, err := r.upsertWork(cctx, work, existingWorks[work.Name].DeepCopy(), snapshot) if err != nil { return err } From 3a1b4a964d9f85a93f19c426f91d076604345432 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Fri, 13 Oct 2023 15:03:18 -0700 Subject: [PATCH 5/6] fix UT and keep the previous behavior that we create empty work --- .../rollout/controller_integration_test.go | 25 ++++++++++--------- pkg/controllers/workgenerator/controller.go | 14 ++++++----- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/pkg/controllers/rollout/controller_integration_test.go b/pkg/controllers/rollout/controller_integration_test.go index b57cb989e..0943bb553 100644 --- a/pkg/controllers/rollout/controller_integration_test.go +++ b/pkg/controllers/rollout/controller_integration_test.go @@ -161,7 +161,7 @@ var _ = Describe("Test the rollout Controller", func() { // simulate that some of the bindings are applied firstApplied := 3 for i := 0; i < firstApplied; i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } // simulate another scheduling decision, pick some cluster to unselect from the bottom of the list var newTarget int32 = 9 @@ -182,7 +182,7 @@ var _ = Describe("Test the rollout Controller", func() { } // simulate that some of the bindings are applied for i := firstApplied; i < int(newTarget); i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } newScheduled := int(newTarget) - stillScheduled for i := 0; i < newScheduled; i++ { @@ -194,7 +194,7 @@ var _ = Describe("Test the rollout Controller", func() { } // simulate that some of the bindings are applied for i := int(newTarget); i < int(targetCluster); i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } // check that the second round of bindings are scheduled Eventually(func() bool { @@ -211,7 +211,7 @@ var _ = Describe("Test the rollout Controller", func() { }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") // simulate that the new bindings are applied for i := 0; i < len(secondRoundBindings); i++ { - markBindingApplied(secondRoundBindings[i]) + markBindingApplied(secondRoundBindings[i].GetName()) } // check that the unselected bindings are deleted Eventually(func() bool { @@ -258,7 +258,7 @@ var _ = Describe("Test the rollout Controller", func() { // simulate that some of the bindings are applied firstApplied := 3 for i := 0; i < firstApplied; i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } // simulate another scheduling decision, pick some cluster to unselect from the bottom of the list var newTarget int32 = 9 @@ -280,7 +280,7 @@ var _ = Describe("Test the rollout Controller", func() { } // simulate that some of the bindings are applied for i := firstApplied; i < int(newTarget); i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } // create the newly scheduled bindings newScheduled := int(newTarget) - stillScheduled @@ -293,7 +293,7 @@ var _ = Describe("Test the rollout Controller", func() { } // simulate that some of the bindings are applied for i := int(newTarget); i < int(targetCluster); i++ { - markBindingApplied(bindings[i]) + markBindingApplied(bindings[i].GetName()) } // mark the master snapshot as not latest masterSnapshot.SetLabels(map[string]string{ @@ -319,7 +319,7 @@ var _ = Describe("Test the rollout Controller", func() { }, timeout, interval).Should(BeTrue(), "rollout controller should roll all the bindings to Bound state") // simulate that the new bindings are applied for i := 0; i < len(secondRoundBindings); i++ { - markBindingApplied(secondRoundBindings[i]) + markBindingApplied(secondRoundBindings[i].GetName()) } // check that the unselected bindings are deleted Eventually(func() bool { @@ -342,7 +342,7 @@ var _ = Describe("Test the rollout Controller", func() { } if binding.Spec.ResourceSnapshotName == newMasterSnapshot.Name { // simulate the work generator to make the newly updated bindings to be applied - markBindingApplied(binding) + markBindingApplied(binding.GetName()) } else { misMatch = true } @@ -458,16 +458,17 @@ var _ = Describe("Test the rollout Controller", func() { }) -func markBindingApplied(binding *fleetv1beta1.ClusterResourceBinding) { +func markBindingApplied(bindingName string) { + binding := fleetv1beta1.ClusterResourceBinding{} // get the binding again to avoid conflict - Expect(k8sClient.Get(ctx, types.NamespacedName{Name: binding.GetName()}, binding)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{Name: bindingName}, &binding)).Should(Succeed()) binding.SetConditions(metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingApplied), Reason: "applied", ObservedGeneration: binding.Generation, }) - Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed()) + Expect(k8sClient.Status().Update(ctx, &binding)).Should(Succeed()) By(fmt.Sprintf("resource binding `%s` is marked as applied", binding.Name)) } diff --git a/pkg/controllers/workgenerator/controller.go b/pkg/controllers/workgenerator/controller.go index c198b38d3..f263bd512 100644 --- a/pkg/controllers/workgenerator/controller.go +++ b/pkg/controllers/workgenerator/controller.go @@ -278,14 +278,16 @@ func (r *Reconciler) syncAllWork(ctx context.Context, resourceBinding *fleetv1be simpleManifests = append(simpleManifests, fleetv1beta1.Manifest(selectedResource)) } } - if len(simpleManifests) != 0 { - // generate a work object for the manifests if there are still any non enveloped resources - work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests) - activeWork[work.Name] = work - newWork = append(newWork, work) - } else { + if len(simpleManifests) == 0 { klog.V(2).InfoS("the snapshot contains enveloped resource only", "snapshot", klog.KObj(snapshot)) } + // generate a work object for the manifests even if there is nothing to place + // to allow CRP to collect the status of the placement + // TODO (RZ): revisit to see if we need this hack + work := generateSnapshotWorkObj(workNamePrefix, resourceBinding, snapshot, simpleManifests) + activeWork[work.Name] = work + newWork = append(newWork, work) + // issue all the create/update requests for the corresponding works for each snapshot in parallel for i := range newWork { work := newWork[i] From 78d56fcf07bbe01accc6ab4bb5f8196064f989a7 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Mon, 16 Oct 2023 01:25:21 -0700 Subject: [PATCH 6/6] address comment --- .../clusterresourceplacement/placement_status_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index ab7ede38b..61af8d093 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -1007,7 +1007,7 @@ func TestBuildFailedResourcePlacements(t *testing.T) { Status: fleetv1beta1.WorkStatus{ Conditions: []metav1.Condition{ { - Type: string(fleetv1beta1.WorkConditionTypeApplied), + Type: fleetv1beta1.WorkConditionTypeApplied, ObservedGeneration: 1, Status: metav1.ConditionTrue, }, @@ -1017,7 +1017,7 @@ func TestBuildFailedResourcePlacements(t *testing.T) { wantIsPending: true, wantRes: nil, }, - "pending if applied failed with multiple object": { + "report failure if applied failed with multiple object": { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, @@ -1100,7 +1100,7 @@ func TestBuildFailedResourcePlacements(t *testing.T) { }, }, }, - "pending if applied failed with an envelop object": { + "report failure if applied failed with an envelop object": { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Generation: 1,