diff --git a/Makefile b/Makefile index f14b8e50e1..7c7b84b48a 100644 --- a/Makefile +++ b/Makefile @@ -116,11 +116,18 @@ package-images: build-images ## Package docker images for airgap environment ./scripts/package-images .PHONY: package-bundle -package-bundle: build ## Package the tarball bundle +package-bundle: build ## Package the tarball bundle ./scripts/package-bundle .PHONY: test -test: +test: unit-tests integration-tests + +.PHONY: unit-tests +unit-tests: + ./scripts/unit-tests + +.PHONY: integration-tests +integration-tests: ./scripts/test ./.dapper: diff --git a/pkg/rke2/psp.go b/pkg/rke2/psp.go index bd0b6f7403..365d8a9f22 100644 --- a/pkg/rke2/psp.go +++ b/pkg/rke2/psp.go @@ -369,8 +369,6 @@ func updateNamespaceRef(ctx context.Context, cs *kubernetes.Clientset, ns *v1.Na return nil } -type deployFn func(context.Context, *kubernetes.Clientset, interface{}) error - // newClient create a new Kubernetes client from configuration. func newClient(kubeConfigPath string, k8sWrapTransport transport.WrapperFunc) (*kubernetes.Clientset, error) { config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) @@ -389,116 +387,152 @@ func decodeYamlResource(data interface{}, yaml string) error { return decoder.Decode(data) } -func retryTo(ctx context.Context, runFunc deployFn, cs *kubernetes.Clientset, resource interface{}, retries, wait int) error { - var err error - if retries <= 0 { - retries = defaultRetries - } - if wait <= 0 { - wait = defaultWaitSeconds - } - for i := 0; i < retries; i++ { - if err = runFunc(ctx, cs, resource); err != nil { - time.Sleep(time.Second * time.Duration(wait)) - continue - } - return nil - } - return err -} - -func deployPodSecurityPolicyFromYaml(ctx context.Context, cs *kubernetes.Clientset, pspYaml string) error { +func deployPodSecurityPolicyFromYaml(ctx context.Context, cs kubernetes.Interface, pspYaml string) error { var psp v1beta1.PodSecurityPolicy if err := decodeYamlResource(&psp, pspYaml); err != nil { return err } - return retryTo(ctx, deployPodSecurityPolicy, cs, psp, defaultRetries, defaultWaitSeconds) -} -func deployPodSecurityPolicy(ctx context.Context, cs *kubernetes.Clientset, p interface{}) error { - psp, ok := p.(v1beta1.PodSecurityPolicy) - if !ok { - return fmt.Errorf("invalid type provided: %T, expected: PodSecurityPolicy", p) - } - if _, err := cs.PolicyV1beta1().PodSecurityPolicies().Create(ctx, &psp, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { + // try to create the given PSP. If it already exists, we fall through to + // attempting to update the existing PSP. + if err := retry.OnError(retry.DefaultBackoff, + func(err error) bool { + return !apierrors.IsAlreadyExists(err) + }, func() error { + _, err := cs.PolicyV1beta1().PodSecurityPolicies().Create(ctx, &psp, metav1.CreateOptions{}) return err - } - if _, err := cs.PolicyV1beta1().PodSecurityPolicies().Update(ctx, &psp, metav1.UpdateOptions{}); err != nil { + }, + ); err != nil && apierrors.IsAlreadyExists(err) { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + retrievedPSP, err := cs.PolicyV1beta1().PodSecurityPolicies().Get(ctx, psp.Name, metav1.GetOptions{}) + if err != nil { + return err + } + if retrievedPSP.Annotations == nil { + retrievedPSP.Annotations = make(map[string]string, len(psp.Annotations)) + } + for k, v := range psp.Annotations { + retrievedPSP.Annotations[k] = v + } + retrievedPSP.Spec = psp.Spec + _, err = cs.PolicyV1beta1().PodSecurityPolicies().Update(ctx, retrievedPSP, metav1.UpdateOptions{}) return err - } + }) + } else if err != nil { + return err } return nil } -func deployClusterRoleBindingFromYaml(ctx context.Context, cs *kubernetes.Clientset, clusterRoleBindingYaml string) error { +func deployClusterRoleBindingFromYaml(ctx context.Context, cs kubernetes.Interface, clusterRoleBindingYaml string) error { var clusterRoleBinding rbacv1.ClusterRoleBinding if err := decodeYamlResource(&clusterRoleBinding, clusterRoleBindingYaml); err != nil { return err } - return retryTo(ctx, deployClusterRoleBinding, cs, clusterRoleBinding, defaultRetries, defaultWaitSeconds) -} -func deployClusterRoleBinding(ctx context.Context, cs *kubernetes.Clientset, crb interface{}) error { - clusterRoleBinding, ok := crb.(rbacv1.ClusterRoleBinding) - if !ok { - return fmt.Errorf("invalid type provided: %T, expected: ClusterRoleBinding", crb) - } - if _, err := cs.RbacV1().ClusterRoleBindings().Create(ctx, &clusterRoleBinding, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { + // try to create the given cluster role binding. If it already exists, we + // fall through to attempting to update the existing cluster role binding. + if err := retry.OnError(retry.DefaultBackoff, + func(err error) bool { + return !apierrors.IsAlreadyExists(err) + }, func() error { + _, err := cs.RbacV1().ClusterRoleBindings().Create(ctx, &clusterRoleBinding, metav1.CreateOptions{}) return err - } - if _, err := cs.RbacV1().ClusterRoleBindings().Update(ctx, &clusterRoleBinding, metav1.UpdateOptions{}); err != nil { + }, + ); err != nil && apierrors.IsAlreadyExists(err) { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + retrievedCRB, err := cs.RbacV1().ClusterRoleBindings().Get(ctx, clusterRoleBinding.Name, metav1.GetOptions{}) + if err != nil { + return err + } + if retrievedCRB.Annotations == nil { + retrievedCRB.Annotations = make(map[string]string, len(clusterRoleBinding.Annotations)) + } + for k, v := range clusterRoleBinding.Annotations { + retrievedCRB.Annotations[k] = v + } + retrievedCRB.Subjects = clusterRoleBinding.Subjects + retrievedCRB.RoleRef = clusterRoleBinding.RoleRef + _, err = cs.RbacV1().ClusterRoleBindings().Update(ctx, retrievedCRB, metav1.UpdateOptions{}) return err - } + }) + } else if err != nil { + return err } return nil } -func deployClusterRoleFromYaml(ctx context.Context, cs *kubernetes.Clientset, clusterRoleYaml string) error { +func deployClusterRoleFromYaml(ctx context.Context, cs kubernetes.Interface, clusterRoleYaml string) error { var clusterRole rbacv1.ClusterRole if err := decodeYamlResource(&clusterRole, clusterRoleYaml); err != nil { return err } - return retryTo(ctx, deployClusterRole, cs, clusterRole, defaultRetries, defaultWaitSeconds) -} -func deployClusterRole(ctx context.Context, cs *kubernetes.Clientset, cr interface{}) error { - clusterRole, ok := cr.(rbacv1.ClusterRole) - if !ok { - return fmt.Errorf("invalid type provided: %T, expected: ClusterRole", cr) - } - if _, err := cs.RbacV1().ClusterRoles().Create(ctx, &clusterRole, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { + // try to create the given cluster role. If it already exists, we + // fall through to attempting to update the existing cluster role. + if err := retry.OnError(retry.DefaultBackoff, + func(err error) bool { + return !apierrors.IsAlreadyExists(err) + }, func() error { + _, err := cs.RbacV1().ClusterRoles().Create(ctx, &clusterRole, metav1.CreateOptions{}) return err - } - if _, err := cs.RbacV1().ClusterRoles().Update(ctx, &clusterRole, metav1.UpdateOptions{}); err != nil { + }, + ); err != nil && apierrors.IsAlreadyExists(err) { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + retrievedCR, err := cs.RbacV1().ClusterRoles().Get(ctx, clusterRole.Name, metav1.GetOptions{}) + if err != nil { + return err + } + if retrievedCR.Annotations == nil { + retrievedCR.Annotations = make(map[string]string, len(clusterRole.Annotations)) + } + for k, v := range clusterRole.Annotations { + retrievedCR.Annotations[k] = v + } + retrievedCR.Rules = clusterRole.Rules + _, err = cs.RbacV1().ClusterRoles().Update(ctx, retrievedCR, metav1.UpdateOptions{}) return err - } + }) + } else if err != nil { + return err } return nil } -func deployRoleBindingFromYaml(ctx context.Context, cs *kubernetes.Clientset, roleBindingYaml string) error { +func deployRoleBindingFromYaml(ctx context.Context, cs kubernetes.Interface, roleBindingYaml string) error { var roleBinding rbacv1.RoleBinding if err := decodeYamlResource(&roleBinding, roleBindingYaml); err != nil { return err } - return retryTo(ctx, deployRoleBinding, cs, roleBinding, defaultRetries, defaultWaitSeconds) -} -func deployRoleBinding(ctx context.Context, cs *kubernetes.Clientset, rb interface{}) error { - roleBinding, ok := rb.(rbacv1.RoleBinding) - if !ok { - return fmt.Errorf("invalid type provided: %T, expected: RoleBinding", rb) - } - if _, err := cs.RbacV1().RoleBindings(roleBinding.Namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { + // try to create the given role binding. If it already exists, we fall through to + // attempting to update the existing role binding. + if err := retry.OnError(retry.DefaultBackoff, + func(err error) bool { + return !apierrors.IsAlreadyExists(err) + }, func() error { + _, err := cs.RbacV1().RoleBindings(roleBinding.Namespace).Create(ctx, &roleBinding, metav1.CreateOptions{}) return err - } - if _, err := cs.RbacV1().RoleBindings(roleBinding.Namespace).Update(ctx, &roleBinding, metav1.UpdateOptions{}); err != nil { + }, + ); err != nil && apierrors.IsAlreadyExists(err) { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + retrievedR, err := cs.RbacV1().RoleBindings(roleBinding.Namespace).Get(ctx, roleBinding.Name, metav1.GetOptions{}) + if err != nil { + return err + } + if retrievedR.Annotations == nil { + retrievedR.Annotations = make(map[string]string, len(roleBinding.Annotations)) + } + for k, v := range roleBinding.Annotations { + retrievedR.Annotations[k] = v + } + retrievedR.Subjects = roleBinding.Subjects + retrievedR.RoleRef = roleBinding.RoleRef + _, err = cs.RbacV1().RoleBindings(roleBinding.Namespace).Update(ctx, retrievedR, metav1.UpdateOptions{}) return err - } + }) + } else if err != nil { + return err } return nil } diff --git a/pkg/rke2/psp_templates.go b/pkg/rke2/psp_templates.go index 917061147d..eb7e692a03 100644 --- a/pkg/rke2/psp_templates.go +++ b/pkg/rke2/psp_templates.go @@ -127,8 +127,7 @@ subjects: name: system:authenticated ` -const systemUnrestrictedPSPTemplate = ` -apiVersion: policy/v1beta1 +const systemUnrestrictedPSPTemplate = `apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: %s diff --git a/pkg/rke2/psp_test.go b/pkg/rke2/psp_test.go new file mode 100644 index 0000000000..8c86ac402c --- /dev/null +++ b/pkg/rke2/psp_test.go @@ -0,0 +1,418 @@ +package rke2 + +import ( + "context" + "errors" + "fmt" + "testing" + + "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" + fakerbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1/fake" + k8stesting "k8s.io/client-go/testing" +) + +const ( + testPSPName = "test-psp" + testClusterRoleName = "test-cluster-role" + testClusterRoleBindingName = "test-cluster-role-binding" + testRoleBindingName = "test-role-binding" +) + +var testPodSecurityPolicy = &v1beta1.PodSecurityPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: testPSPName, + }, +} + +var testClusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: testClusterRoleName, + }, +} + +var testClusterRoleBinding = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: testClusterRoleBindingName, + }, +} + +var testRoleBinding = &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: testRoleBindingName, + }, +} + +// fakeWithNonretriableError recieves a value of type runtime.Object, +// determines underlying underlying type, and creates a new value of +// type fake.Clientset pointer and sets a Reactor to return an error +// that is not retriable. +func fakeWithNonretriableError(ro interface{}) *fake.Clientset { + cs := fake.NewSimpleClientset(testPodSecurityPolicy) + const errMsg = "non retriable error" + switch ro.(type) { + case *v1beta1.PodSecurityPolicy: + cs.PolicyV1beta1().(*fakepolicyv1beta1.FakePolicyV1beta1).PrependReactor("update", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1beta1.PodSecurityPolicy{}, errors.New(errMsg) + }, + ) + case *rbacv1.ClusterRoleBinding: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.ClusterRoleBinding{}, errors.New(errMsg) + }, + ) + case *rbacv1.ClusterRole: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.ClusterRole{}, errors.New(errMsg) + }, + ) + case *rbacv1.RoleBinding: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.RoleBinding{}, errors.New(errMsg) + }, + ) + } + return cs +} + +// fakeWithRetriableError creates a new value of fake.Clientset +// pointer and sets a Reactor to return an error that will be +// caught by retry logic. +func fakeWithRetriableError(ro interface{}) *fake.Clientset { + cs := fake.NewSimpleClientset(testPodSecurityPolicy) + switch ro.(type) { + case *v1beta1.PodSecurityPolicy: + cs.PolicyV1beta1().(*fakepolicyv1beta1.FakePolicyV1beta1).PrependReactor("update", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &v1beta1.PodSecurityPolicy{}, + k8serrors.NewConflict(schema.GroupResource{ + Resource: "psp", + }, + "psp-update", nil, + ) + }, + ) + case *rbacv1.ClusterRoleBinding: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.ClusterRoleBinding{}, + k8serrors.NewConflict(schema.GroupResource{ + Resource: "clusterolebindings", + }, + "cluster-role-binding-update", nil, + ) + }, + ) + case *rbacv1.ClusterRole: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.ClusterRole{}, + k8serrors.NewConflict(schema.GroupResource{ + Resource: "clusterrole", + }, + "cluster-role-update", nil, + ) + }, + ) + case *rbacv1.RoleBinding: + cs.RbacV1().(*fakerbacv1.FakeRbacV1).PrependReactor("*", "*", + func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { + return true, &rbacv1.RoleBinding{}, + k8serrors.NewConflict(schema.GroupResource{ + Resource: "rolebindings", + }, + "role-binding-update", nil, + ) + }, + ) + } + return cs +} + +func Test_deployPodSecurityPolicyFromYaml(t *testing.T) { + pspYAML := fmt.Sprintf(globalRestrictedPSPTemplate, testPSPName) + type args struct { + ctx context.Context + cs kubernetes.Interface + pspYaml string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "fail to decode YAML", + args: args{ + ctx: context.TODO(), + cs: fake.NewSimpleClientset(testPodSecurityPolicy), + pspYaml: "", + }, + wantErr: true, + }, + { + name: "successfully create PSP", + args: args{ + ctx: context.TODO(), + cs: fake.NewSimpleClientset(&v1beta1.PodSecurityPolicy{}), + pspYaml: pspYAML, + }, + wantErr: false, + }, + { + name: "successfully update PSP", + args: args{ + ctx: context.TODO(), + cs: fake.NewSimpleClientset(testPodSecurityPolicy), + pspYaml: pspYAML, + }, + wantErr: false, + }, + { + name: "fail update PSP - nonretriable", + args: args{ + ctx: context.TODO(), + cs: fakeWithNonretriableError(&v1beta1.PodSecurityPolicy{}), + pspYaml: pspYAML, + }, + wantErr: true, + }, + { + name: "fail update PSP - retriable error", + args: args{ + ctx: context.TODO(), + cs: fakeWithRetriableError(&v1beta1.PodSecurityPolicy{}), + pspYaml: pspYAML, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := deployPodSecurityPolicyFromYaml(tt.args.ctx, tt.args.cs, tt.args.pspYaml); (err != nil) != tt.wantErr { + t.Errorf("deployPodSecurityPolicyFromYaml() error = %v, wantErr %v", err, tt.wantErr) + } + //verify that the existing PSP has in fact been updated from the given YAML. + if tt.name == "successfully create PSP" || tt.name == "successfully update PSP" { + val, _ := tt.args.cs.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), testPSPName, metav1.GetOptions{}) + annocationsLen := len(val.Annotations) + if annocationsLen != 4 { + t.Errorf("expected 4 but got %d", annocationsLen) + } + } + }) + } +} + +func Test_deployClusterRoleBindingFromYaml(t *testing.T) { + clusterRoleBindingYaml := fmt.Sprintf(kubeletAPIServerRoleBindingTemplate, testClusterRoleBindingName) + type args struct { + ctx context.Context + cs kubernetes.Interface + clusterRoleBindingYaml string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "fail to decode YAML", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testClusterRoleBinding), + clusterRoleBindingYaml: "", + }, + wantErr: true, + }, + { + name: "successfully create cluster role binding", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(&rbacv1.ClusterRoleBinding{}), + clusterRoleBindingYaml: clusterRoleBindingYaml, + }, + wantErr: false, + }, + { + name: "successfully update cluster role binding", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testClusterRoleBinding), + clusterRoleBindingYaml: clusterRoleBindingYaml, + }, + wantErr: false, + }, + { + name: "fail update cluster role binding - nonretriable", + args: args{ + ctx: context.TODO(), + cs: fakeWithNonretriableError(&rbacv1.ClusterRoleBinding{}), + clusterRoleBindingYaml: clusterRoleBindingYaml, + }, + wantErr: true, + }, + { + name: "fail update cluster role binding - retriable error", + args: args{ + ctx: context.TODO(), + cs: fakeWithRetriableError(&rbacv1.ClusterRoleBinding{}), + clusterRoleBindingYaml: clusterRoleBindingYaml, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := deployClusterRoleBindingFromYaml(tt.args.ctx, tt.args.cs, tt.args.clusterRoleBindingYaml); (err != nil) != tt.wantErr { + t.Errorf("deployClusterRoleBindingFromYaml() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_deployClusterRoleFromYaml(t *testing.T) { + const testResourceName = "test-resource-name" + clusterRoleYaml := fmt.Sprintf(roleTemplate, "test-cluster-role", testResourceName) + type args struct { + ctx context.Context + cs kubernetes.Interface + clusterRoleYaml string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "fail to decode YAML", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testClusterRole), + clusterRoleYaml: "", + }, + wantErr: true, + }, + { + name: "successfully create cluster role", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(&rbacv1.ClusterRole{}), + clusterRoleYaml: clusterRoleYaml, + }, + wantErr: false, + }, + { + name: "successfully update cluster role", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testClusterRole), + clusterRoleYaml: clusterRoleYaml, + }, + wantErr: false, + }, + { + name: "fail update cluster role binding - nonretriable", + args: args{ + ctx: context.TODO(), + cs: fakeWithNonretriableError(&rbacv1.ClusterRole{}), + clusterRoleYaml: clusterRoleYaml, + }, + wantErr: true, + }, + { + name: "fail update cluster role binding - retriable error", + args: args{ + ctx: context.TODO(), + cs: fakeWithRetriableError(&rbacv1.ClusterRole{}), + clusterRoleYaml: clusterRoleYaml, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := deployClusterRoleFromYaml(tt.args.ctx, tt.args.cs, tt.args.clusterRoleYaml); (err != nil) != tt.wantErr { + t.Errorf("deployClusterRoleFromYaml() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_deployRoleBindingFromYaml(t *testing.T) { + roleBindingYaml := fmt.Sprintf(tunnelControllerRoleTemplate, testRoleBindingName) + type args struct { + ctx context.Context + cs kubernetes.Interface + roleBindingYaml string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "fail to decode YAML", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testRoleBinding), + roleBindingYaml: "", + }, + wantErr: true, + }, + { + name: "successfully create role binding", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(&rbacv1.RoleBinding{}), + roleBindingYaml: roleBindingYaml, + }, + wantErr: false, + }, + { + name: "successfully update role binding", + args: args{ + ctx: context.Background(), + cs: fake.NewSimpleClientset(testRoleBinding), + roleBindingYaml: roleBindingYaml, + }, + wantErr: false, + }, + { + name: "fail update role binding - nonretriable", + args: args{ + ctx: context.TODO(), + cs: fakeWithNonretriableError(&rbacv1.RoleBinding{}), + roleBindingYaml: roleBindingYaml, + }, + wantErr: true, + }, + { + name: "fail update role binding - retriable error", + args: args{ + ctx: context.TODO(), + cs: fakeWithRetriableError(&rbacv1.RoleBinding{}), + roleBindingYaml: roleBindingYaml, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := deployRoleBindingFromYaml(tt.args.ctx, tt.args.cs, tt.args.roleBindingYaml); (err != nil) != tt.wantErr { + t.Errorf("deployRoleBindingFromYaml() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/scripts/unit-tests b/scripts/unit-tests new file mode 100755 index 0000000000..c19d5b69bc --- /dev/null +++ b/scripts/unit-tests @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -ex + +go test -v -cover ./...