From 00c484588dd9f1857dcb30448bd9f1aeb70548d5 Mon Sep 17 00:00:00 2001 From: Steve Kuznetsov Date: Fri, 25 Aug 2023 10:28:07 -0600 Subject: [PATCH] *: label k8s objects we own Signed-off-by: Steve Kuznetsov --- go.mod | 2 +- pkg/controller/operators/catalog/operator.go | 119 +++++++++++++ .../operators/internal/alongside/alongside.go | 8 +- .../internal/alongside/alongside_test.go | 4 +- pkg/controller/operators/labeller/labels.go | 160 ++++++++++++++++++ pkg/controller/operators/olm/operator.go | 62 +++++++ 6 files changed, 348 insertions(+), 7 deletions(-) create mode 100644 pkg/controller/operators/labeller/labels.go diff --git a/go.mod b/go.mod index e1c43d8bcd..07552c3ba6 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/coreos/go-semver v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/distribution/distribution v2.7.1+incompatible + github.com/evanphx/json-patch v5.6.0+incompatible github.com/fsnotify/fsnotify v1.6.0 github.com/ghodss/yaml v1.0.0 github.com/go-air/gini v1.0.4 @@ -95,7 +96,6 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect diff --git a/pkg/controller/operators/catalog/operator.go b/pkg/controller/operators/catalog/operator.go index 9f091dc997..db46d25ef7 100644 --- a/pkg/controller/operators/catalog/operator.go +++ b/pkg/controller/operators/catalog/operator.go @@ -11,10 +11,13 @@ import ( "sync" "time" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/alongside" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/validatingroundtripper" errorwrap "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc/connectivity" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" @@ -32,6 +35,9 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/yaml" + batchv1applyconfigurations "k8s.io/client-go/applyconfigurations/batch/v1" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + rbacv1applyconfigurations "k8s.io/client-go/applyconfigurations/rbac/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" "k8s.io/client-go/metadata" @@ -103,6 +109,7 @@ type Operator struct { client versioned.Interface dynamicClient dynamic.Interface lister operatorlister.OperatorLister + k8sLabelQueueSets map[schema.GroupVersionResource]workqueue.RateLimitingInterface catsrcQueueSet *queueinformer.ResourceQueueSet subQueueSet *queueinformer.ResourceQueueSet ipQueueSet *queueinformer.ResourceQueueSet @@ -191,6 +198,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo lister: lister, namespace: operatorNamespace, recorder: eventRecorder, + k8sLabelQueueSets: map[schema.GroupVersionResource]workqueue.RateLimitingInterface{}, catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(), subQueueSet: queueinformer.NewEmptyResourceQueueSet(), ipQueueSet: queueinformer.NewEmptyResourceQueueSet(), @@ -363,21 +371,84 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo op.lister.RbacV1().RegisterRoleLister(metav1.NamespaceAll, roleInformer.Lister()) sharedIndexInformers = append(sharedIndexInformers, roleInformer.Informer()) + labelObjects := func(gvr schema.GroupVersionResource, informer cache.SharedIndexInformer, sync queueinformer.LegacySyncHandler) error { + op.k8sLabelQueueSets[gvr] = workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{ + Name: gvr.String(), + }) + queueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(informer), + queueinformer.WithSyncer(sync.ToSyncer()), + ) + if err != nil { + return err + } + + if err := op.RegisterQueueInformer(queueInformer); err != nil { + return err + } + + return nil + } + + if err := labelObjects(rbacv1.SchemeGroupVersion.WithResource("roles"), roleInformer.Informer(), labeller.ObjectLabeler[*rbacv1.Role, *rbacv1applyconfigurations.RoleApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + rbacv1applyconfigurations.Role, + func(namespace string, ctx context.Context, cfg *rbacv1applyconfigurations.RoleApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.Role, error) { + return op.opClient.KubernetesInterface().RbacV1().Roles(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Wire RoleBindings roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings() op.lister.RbacV1().RegisterRoleBindingLister(metav1.NamespaceAll, roleBindingInformer.Lister()) sharedIndexInformers = append(sharedIndexInformers, roleBindingInformer.Informer()) + if err := labelObjects(rbacv1.SchemeGroupVersion.WithResource("rolebindings"), roleBindingInformer.Informer(), labeller.ObjectLabeler[*rbacv1.RoleBinding, *rbacv1applyconfigurations.RoleBindingApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + rbacv1applyconfigurations.RoleBinding, + func(namespace string, ctx context.Context, cfg *rbacv1applyconfigurations.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.RoleBinding, error) { + return op.opClient.KubernetesInterface().RbacV1().RoleBindings(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Wire ServiceAccounts serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts() op.lister.CoreV1().RegisterServiceAccountLister(metav1.NamespaceAll, serviceAccountInformer.Lister()) sharedIndexInformers = append(sharedIndexInformers, serviceAccountInformer.Informer()) + if err := labelObjects(corev1.SchemeGroupVersion.WithResource("serviceaccounts"), serviceAccountInformer.Informer(), labeller.ObjectLabeler[*corev1.ServiceAccount, *corev1applyconfigurations.ServiceAccountApplyConfiguration]( + ctx, op.logger, func(object metav1.Object) bool { + return labeller.HasOLMOwnerRef(object) || labeller.HasOLMLabel(object) + }, + corev1applyconfigurations.ServiceAccount, + func(namespace string, ctx context.Context, cfg *corev1applyconfigurations.ServiceAccountApplyConfiguration, opts metav1.ApplyOptions) (*corev1.ServiceAccount, error) { + return op.opClient.KubernetesInterface().CoreV1().ServiceAccounts(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Wire Services serviceInformer := k8sInformerFactory.Core().V1().Services() op.lister.CoreV1().RegisterServiceLister(metav1.NamespaceAll, serviceInformer.Lister()) sharedIndexInformers = append(sharedIndexInformers, serviceInformer.Informer()) + if err := labelObjects(corev1.SchemeGroupVersion.WithResource("services"), serviceInformer.Informer(), labeller.ObjectLabeler[*corev1.Service, *corev1applyconfigurations.ServiceApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + corev1applyconfigurations.Service, + func(namespace string, ctx context.Context, cfg *corev1applyconfigurations.ServiceApplyConfiguration, opts metav1.ApplyOptions) (*corev1.Service, error) { + return op.opClient.KubernetesInterface().CoreV1().Services(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Wire Pods for CatalogSource catsrcReq, err := labels.NewRequirement(reconciler.CatalogSourceLabelKey, selection.Exists, nil) if err != nil { @@ -392,6 +463,19 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo op.lister.CoreV1().RegisterPodLister(metav1.NamespaceAll, csPodInformer.Lister()) sharedIndexInformers = append(sharedIndexInformers, csPodInformer.Informer()) + if err := labelObjects(corev1.SchemeGroupVersion.WithResource("pods"), csPodInformer.Informer(), labeller.ObjectLabeler[*corev1.Pod, *corev1applyconfigurations.PodApplyConfiguration]( + ctx, op.logger, func(object metav1.Object) bool { + _, ok := object.GetLabels()[reconciler.CatalogSourceLabelKey] + return ok + }, + corev1applyconfigurations.Pod, + func(namespace string, ctx context.Context, cfg *corev1applyconfigurations.PodApplyConfiguration, opts metav1.ApplyOptions) (*corev1.Pod, error) { + return op.opClient.KubernetesInterface().CoreV1().Pods(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Wire Pods for BundleUnpack job buReq, err := labels.NewRequirement(bundle.BundleUnpackPodLabel, selection.Exists, nil) if err != nil { @@ -416,6 +500,27 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo jobInformer := k8sInformerFactory.Batch().V1().Jobs() sharedIndexInformers = append(sharedIndexInformers, jobInformer.Informer()) + if err := labelObjects(batchv1.SchemeGroupVersion.WithResource("jobs"), jobInformer.Informer(), labeller.ObjectLabeler[*batchv1.Job, *batchv1applyconfigurations.JobApplyConfiguration]( + ctx, op.logger, func(object metav1.Object) bool { + for _, ownerRef := range object.GetOwnerReferences() { + if ownerRef.APIVersion == corev1.SchemeGroupVersion.String() && ownerRef.Kind == "ConfigMap" { + cm, err := configMapInformer.Lister().ConfigMaps(object.GetNamespace()).Get(ownerRef.Name) + if err != nil { + return false + } + return labeller.HasOLMOwnerRef(cm) + } + } + return false + }, + batchv1applyconfigurations.Job, + func(namespace string, ctx context.Context, cfg *batchv1applyconfigurations.JobApplyConfiguration, opts metav1.ApplyOptions) (*batchv1.Job, error) { + return op.opClient.KubernetesInterface().BatchV1().Jobs(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // Generate and register QueueInformers for k8s resources k8sSyncer := queueinformer.LegacySyncHandler(op.syncObject).ToSyncerWithDelete(op.handleDeletion) for _, informer := range sharedIndexInformers { @@ -480,6 +585,20 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo return nil, err } + if err := labelObjects(apiextensionsv1.SchemeGroupVersion.WithResource("customresourcedefinitions"), crdInformer, labeller.ObjectPatchLabeler( + ctx, op.logger, func(object metav1.Object) bool { + for key := range object.GetAnnotations() { + if strings.HasPrefix(key, alongside.AnnotationPrefix) { + return true + } + } + return false + }, + op.opClient.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Patch, + )); err != nil { + return nil, err + } + // Namespace sync for resolving subscriptions namespaceInformer := informers.NewSharedInformerFactory(op.opClient.KubernetesInterface(), resyncPeriod()).Core().V1().Namespaces() op.lister.CoreV1().RegisterNamespaceLister(namespaceInformer.Lister()) diff --git a/pkg/controller/operators/internal/alongside/alongside.go b/pkg/controller/operators/internal/alongside/alongside.go index 5004650b27..7517cf2777 100644 --- a/pkg/controller/operators/internal/alongside/alongside.go +++ b/pkg/controller/operators/internal/alongside/alongside.go @@ -10,7 +10,7 @@ import ( ) const ( - prefix = "operatorframework.io/installed-alongside-" + AnnotationPrefix = "operatorframework.io/installed-alongside-" ) // NamespacedName is a reference to an object by namespace and name. @@ -33,7 +33,7 @@ type Annotator struct{} func (a Annotator) FromObject(o Annotatable) []NamespacedName { var result []NamespacedName for k, v := range o.GetAnnotations() { - if !strings.HasPrefix(k, prefix) { + if !strings.HasPrefix(k, AnnotationPrefix) { continue } tokens := strings.Split(v, "/") @@ -55,7 +55,7 @@ func (a Annotator) ToObject(o Annotatable, nns []NamespacedName) { annotations := o.GetAnnotations() for key := range annotations { - if strings.HasPrefix(key, prefix) { + if strings.HasPrefix(key, AnnotationPrefix) { delete(annotations, key) } } @@ -82,5 +82,5 @@ func key(n NamespacedName) string { hasher.Write([]byte(n.Namespace)) hasher.Write([]byte{'/'}) hasher.Write([]byte(n.Name)) - return fmt.Sprintf("%s%x", prefix, hasher.Sum64()) + return fmt.Sprintf("%s%x", AnnotationPrefix, hasher.Sum64()) } diff --git a/pkg/controller/operators/internal/alongside/alongside_test.go b/pkg/controller/operators/internal/alongside/alongside_test.go index dee4b943fe..902d3ab536 100644 --- a/pkg/controller/operators/internal/alongside/alongside_test.go +++ b/pkg/controller/operators/internal/alongside/alongside_test.go @@ -23,7 +23,7 @@ func TestAnnotatorFromObject(t *testing.T) { NamespacedNames []NamespacedName }{ { - Name: "annotation without prefix ignored", + Name: "annotation without AnnotationPrefix ignored", Object: TestAnnotatable{ "foo": "namespace/name", }, @@ -66,7 +66,7 @@ func TestAnnotatorToObject(t *testing.T) { }, }, { - Name: "annotation without prefix ignored", + Name: "annotation without AnnotationPrefix ignored", Object: TestAnnotatable{ "operatorframework.io/something-else": "", }, diff --git a/pkg/controller/operators/labeller/labels.go b/pkg/controller/operators/labeller/labels.go new file mode 100644 index 0000000000..95eef947eb --- /dev/null +++ b/pkg/controller/operators/labeller/labels.go @@ -0,0 +1,160 @@ +package labeller + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer" + "github.com/sirupsen/logrus" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + operatorsv1alpha2 "github.com/operator-framework/api/pkg/operators/v1alpha2" + + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/decorators" +) + +type ApplyConfig[T any] interface { + WithLabels(map[string]string) T +} + +type Client[A ApplyConfig[A], T metav1.Object] interface { + Apply(ctx context.Context, cfg ApplyConfig[A], opts metav1.ApplyOptions) (result T, err error) +} + +func hasLabel(obj metav1.Object) bool { + value, ok := obj.GetLabels()[install.OLMManagedLabelKey] + return ok && value == install.OLMManagedLabelValue +} + +func ObjectLabeler[T metav1.Object, A ApplyConfig[A]]( + ctx context.Context, + logger *logrus.Logger, + check func(metav1.Object) bool, + applyConfigFor func(name, namespace string) A, + apply func(namespace string, ctx context.Context, cfg A, opts metav1.ApplyOptions) (T, error), +) queueinformer.LegacySyncHandler { + return func(obj interface{}) error { + cast, ok := obj.(T) + if !ok { + err := fmt.Errorf("wrong type %T, expected %T: %#v", obj, new(T), obj) + logger.WithError(err).Error("casting failed") + return fmt.Errorf("casting failed: %w", err) + } + + if !check(cast) || hasLabel(cast) { + return nil + } + + cfg := applyConfigFor(cast.GetName(), cast.GetNamespace()) + cfg.WithLabels(map[string]string{ + install.OLMManagedLabelKey: install.OLMManagedLabelValue, + }) + + _, err := apply(cast.GetNamespace(), ctx, cfg, metav1.ApplyOptions{}) + return err + } +} + +// CRDs did not have applyconfigurations generated for them on accident, we can remove this when +// https://github.com/kubernetes/kubernetes/pull/120177 lands +func ObjectPatchLabeler( + ctx context.Context, + logger *logrus.Logger, + check func(metav1.Object) bool, + patch func(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *apiextensionsv1.CustomResourceDefinition, err error), +) func( + obj interface{}, +) error { + return func(obj interface{}) error { + cast, ok := obj.(*apiextensionsv1.CustomResourceDefinition) + if !ok { + err := fmt.Errorf("wrong type %T, expected %T: %#v", obj, new(*apiextensionsv1.CustomResourceDefinition), obj) + logger.WithError(err).Error("casting failed") + return fmt.Errorf("casting failed: %w", err) + } + + if !check(cast) || hasLabel(cast) { + return nil + } + + uid := cast.GetUID() + rv := cast.GetResourceVersion() + + // to ensure they appear in the patch as preconditions + previous := cast.DeepCopy() + previous.SetUID("") + previous.SetResourceVersion("") + + oldData, err := json.Marshal(previous) + if err != nil { + return fmt.Errorf("failed to Marshal old data for %s/%s: %w", previous.GetNamespace(), previous.GetName(), err) + } + + // to ensure they appear in the patch as preconditions + updated := cast.DeepCopy() + updated.SetUID(uid) + updated.SetResourceVersion(rv) + labels := updated.GetLabels() + if labels == nil { + labels = map[string]string{} + } + labels[install.OLMManagedLabelKey] = install.OLMManagedLabelValue + updated.SetLabels(labels) + + newData, err := json.Marshal(updated) + if err != nil { + return fmt.Errorf("failed to Marshal old data for %s/%s: %w", updated.GetNamespace(), updated.GetName(), err) + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for %s/%s: %w", cast.GetNamespace(), cast.GetName(), err) + } + + _, err = patch(ctx, cast.GetName(), types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err + } +} + +// HasOLMOwnerRef determines if an object is owned by another object in the OLM Groups. +// This checks both classical OwnerRefs and the "OLM OwnerRef" in labels to handle +// cluster-scoped resources. +func HasOLMOwnerRef(object metav1.Object) bool { + for _, ref := range object.GetOwnerReferences() { + for _, gv := range []schema.GroupVersion{ + operatorsv1.GroupVersion, + operatorsv1alpha1.SchemeGroupVersion, + operatorsv1alpha2.GroupVersion, + } { + if ref.APIVersion == gv.String() { + return true + } + } + } + hasOLMOwnerLabels := true + for _, label := range []string{ownerutil.OwnerKey, ownerutil.OwnerNamespaceKey, ownerutil.OwnerKind} { + _, exists := object.GetLabels()[label] + hasOLMOwnerLabels = hasOLMOwnerLabels && exists + } + return hasOLMOwnerLabels +} + +func HasOLMLabel(object metav1.Object) bool { + for key := range object.GetLabels() { + if strings.HasPrefix(key, decorators.ComponentLabelKeyPrefix) { + return true + } + } + return false +} diff --git a/pkg/controller/operators/olm/operator.go b/pkg/controller/operators/olm/operator.go index 83d09e41c8..d476b52d48 100644 --- a/pkg/controller/operators/olm/operator.go +++ b/pkg/controller/operators/olm/operator.go @@ -7,9 +7,11 @@ import ( "strings" "time" + "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/labeller" "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/plugins" "github.com/sirupsen/logrus" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -18,9 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/selection" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + appsv1applyconfigurations "k8s.io/client-go/applyconfigurations/apps/v1" + rbacv1applyconfigurations "k8s.io/client-go/applyconfigurations/rbac/v1" "k8s.io/client-go/informers" k8sscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/metadata/metadatainformer" @@ -73,6 +78,7 @@ type Operator struct { opClient operatorclient.ClientInterface client versioned.Interface lister operatorlister.OperatorLister + k8sLabelQueueSets map[schema.GroupVersionResource]workqueue.RateLimitingInterface protectedCopiedCSVNamespaces map[string]struct{} copiedCSVLister metadatalister.Lister ogQueueSet *queueinformer.ResourceQueueSet @@ -151,6 +157,7 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat resolver: config.strategyResolver, apiReconciler: config.apiReconciler, lister: lister, + k8sLabelQueueSets: map[schema.GroupVersionResource]workqueue.RateLimitingInterface{}, recorder: eventRecorder, apiLabeler: config.apiLabeler, csvIndexers: map[string]cache.Indexer{}, @@ -427,6 +434,37 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat } } + labelObjects := func(gvr schema.GroupVersionResource, informer cache.SharedIndexInformer, sync queueinformer.LegacySyncHandler) error { + op.k8sLabelQueueSets[gvr] = workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{ + Name: gvr.String(), + }) + queueInformer, err := queueinformer.NewQueueInformer( + ctx, + queueinformer.WithLogger(op.logger), + queueinformer.WithInformer(informer), + queueinformer.WithSyncer(sync.ToSyncer()), + ) + if err != nil { + return err + } + + if err := op.RegisterQueueInformer(queueInformer); err != nil { + return err + } + + return nil + } + + if err := labelObjects(appsv1.SchemeGroupVersion.WithResource("deployments"), informersByNamespace[metav1.NamespaceAll].DeploymentInformer.Informer(), labeller.ObjectLabeler[*appsv1.Deployment, *appsv1applyconfigurations.DeploymentApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + appsv1applyconfigurations.Deployment, + func(namespace string, ctx context.Context, cfg *appsv1applyconfigurations.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (*appsv1.Deployment, error) { + return op.opClient.KubernetesInterface().AppsV1().Deployments(namespace).Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // add queue for all namespaces as well objGCQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("%s/obj-gc", "")) op.objGCQueueSet.Set("", objGCQueue) @@ -481,6 +519,18 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat return nil, err } + if err := labelObjects(rbacv1.SchemeGroupVersion.WithResource("clusterroles"), clusterRoleInformer.Informer(), labeller.ObjectLabeler[*rbacv1.ClusterRole, *rbacv1applyconfigurations.ClusterRoleApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + func(name, _ string) *rbacv1applyconfigurations.ClusterRoleApplyConfiguration { + return rbacv1applyconfigurations.ClusterRole(name) + }, + func(_ string, ctx context.Context, cfg *rbacv1applyconfigurations.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.ClusterRole, error) { + return op.opClient.KubernetesInterface().RbacV1().ClusterRoles().Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + clusterRoleBindingInformer := k8sInformerFactory.Rbac().V1().ClusterRoleBindings() informersByNamespace[metav1.NamespaceAll].ClusterRoleBindingInformer = clusterRoleBindingInformer op.lister.RbacV1().RegisterClusterRoleBindingLister(clusterRoleBindingInformer.Lister()) @@ -497,6 +547,18 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat return nil, err } + if err := labelObjects(rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"), clusterRoleBindingInformer.Informer(), labeller.ObjectLabeler[*rbacv1.ClusterRoleBinding, *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration]( + ctx, op.logger, labeller.HasOLMOwnerRef, + func(name, _ string) *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration { + return rbacv1applyconfigurations.ClusterRoleBinding(name) + }, + func(_ string, ctx context.Context, cfg *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.ClusterRoleBinding, error) { + return op.opClient.KubernetesInterface().RbacV1().ClusterRoleBindings().Apply(ctx, cfg, opts) + }, + )); err != nil { + return nil, err + } + // register namespace queueinformer namespaceInformer := k8sInformerFactory.Core().V1().Namespaces() informersByNamespace[metav1.NamespaceAll].NamespaceInformer = namespaceInformer