From 2748b57b8ac539e96c7fbd0ec9fc723cfe5578a8 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 10 Sep 2024 17:07:23 +0530 Subject: [PATCH 01/40] KUBESAW-187: Adjust ksctl adm restart command to use rollout-restart Signed-off-by: Feny Mehta --- go.mod | 1 + go.sum | 1 + pkg/cmd/adm/restart.go | 194 ++++++++++++++++++------------- pkg/cmd/adm/restart_test.go | 120 +++++++++---------- pkg/cmd/adm/unregister_member.go | 3 +- 5 files changed, 174 insertions(+), 145 deletions(-) diff --git a/go.mod b/go.mod index 7bcbe05..89cfa55 100644 --- a/go.mod +++ b/go.mod @@ -83,6 +83,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/lithammer/dedent v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-isatty v0.0.18 // indirect diff --git a/go.sum b/go.sum index d28ef87..0ace6cd 100644 --- a/go.sum +++ b/go.sum @@ -439,6 +439,7 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index dcd65f0..60ed006 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -3,26 +3,32 @@ package adm import ( "context" "fmt" - "time" + "os" "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/cmd/flags" "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" "github.com/kubesaw/ksctl/pkg/ioutils" - "github.com/spf13/cobra" appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" + cmdutil "k8s.io/kubectl/pkg/cmd/util" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) +// NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config +// 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods), +// waits for the new deployment to come up, then uses rollout-restart command for non-olm based - registration-service) +// 2. If the command is run for member operator, it restart the whole member operator.(it deletes olm based pods(member-operator pods), +// waits for the new deployment to come up, then uses rollout-restart command for non-olm based deployments - webhooks) func NewRestartCmd() *cobra.Command { var targetCluster string command := &cobra.Command{ - Use: "restart -t ", + Use: "restart -t ", Short: "Restarts a deployment", Long: `Restarts the deployment with the given name in the operator namespace. If no deployment name is provided, then it lists all existing deployments in the namespace.`, @@ -38,120 +44,140 @@ If no deployment name is provided, then it lists all existing deployments in the return command } -func restart(ctx *clicontext.CommandContext, clusterName string, deployments ...string) error { +func restart(ctx *clicontext.CommandContext, clusterName string, operatorType ...string) error { cfg, err := configuration.LoadClusterConfig(ctx, clusterName) if err != nil { return err } cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) + if err != nil { return err } - if len(deployments) == 0 { - err := printExistingDeployments(ctx.Terminal, cl, cfg.OperatorNamespace) - if err != nil { - ctx.Terminal.Printlnf("\nERROR: Failed to list existing deployments\n :%s", err.Error()) - } - return fmt.Errorf("at least one deployment name is required, include one or more of the above deployments to restart") + if len(operatorType) == 0 { + return fmt.Errorf("please mention one of the following operator names to restart: host | member-1 | member-2") } - deploymentName := deployments[0] if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the deployment '%s' in namespace '%s'", deploymentName, cfg.OperatorNamespace)) { + ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", operatorType[0], cfg.OperatorNamespace)) { return nil } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, deploymentName) + + return restartDeployment(ctx, cl, cfg.OperatorNamespace) } -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error { - namespacedName := types.NamespacedName{ - Namespace: ns, - Name: deploymentName, +func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) error { + olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) + if err != nil { + return err } - originalReplicas, err := scaleToZero(cl, namespacedName) - if err != nil { - if apierrors.IsNotFound(err) { - ctx.Printlnf("\nERROR: The given deployment '%s' wasn't found.", deploymentName) - return printExistingDeployments(ctx, cl, ns) + if olmDeploymentList == nil { + return fmt.Errorf("OLM based deploymont not found in %s", ns) + } + for _, olmDeployment := range olmDeploymentList.Items { + if err := deletePods(ctx, cl, olmDeployment, ns); err != nil { + return err } - return err } - ctx.Println("The deployment was scaled to 0") - if err := scaleBack(ctx, cl, namespacedName, originalReplicas); err != nil { - ctx.Printlnf("Scaling the deployment '%s' in namespace '%s' back to '%d' replicas wasn't successful", originalReplicas) - ctx.Println("Please, try to contact administrators to scale the deployment back manually") - return err + if nonOlmDeploymentlist == nil { + return fmt.Errorf("non-OLM based deploymont not found in %s", ns) + } + for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { + if err := restartNonOlmDeployments(ns, nonOlmDeployment); err != nil { + return err + } + //check the rollout status + if err := checkRolloutStatus(ns); err != nil { + return err + } } - - ctx.Printlnf("The deployment was scaled back to '%d'", originalReplicas) return nil } -func restartHostOperator(ctx *clicontext.CommandContext, hostClient runtimeclient.Client, hostNamespace string) error { - deployments := &appsv1.DeploymentList{} - if err := hostClient.List(context.TODO(), deployments, - runtimeclient.InNamespace(hostNamespace), - runtimeclient.MatchingLabels{"olm.owner.namespace": "toolchain-host-operator"}); err != nil { +func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, ns string) error { + //get pods by label selector from the deployment + pods := corev1.PodList{} + selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + if err := cl.List(ctx, &pods, runtimeclient.MatchingLabelsSelector{Selector: selector}); err != nil { return err } - if len(deployments.Items) != 1 { - return fmt.Errorf("there should be a single deployment matching the label olm.owner.namespace=toolchain-host-operator in %s ns, but %d was found. "+ - "It's not possible to restart the Host Operator deployment", hostNamespace, len(deployments.Items)) + //delete pods + for _, pod := range pods.Items { + if err := cl.Delete(ctx, &pod); err != nil { + return err + } + } + + //check the rollout status + if err := checkRolloutStatus(ns); err != nil { + return err } + return nil - return restartDeployment(ctx, hostClient, hostNamespace, deployments.Items[0].Name) } -func printExistingDeployments(term ioutils.Terminal, cl runtimeclient.Client, ns string) error { - deployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), deployments, runtimeclient.InNamespace(ns)); err != nil { - return err +func restartNonOlmDeployments(ns string, deployment appsv1.Deployment) error { + kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() + hFactory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) + ioStreams := genericclioptions.IOStreams{ + In: nil, // Not to forward the Standard Input + Out: os.Stdout, + ErrOut: os.Stderr, } - deploymentList := "\n" - for _, deployment := range deployments.Items { - deploymentList += fmt.Sprintf("%s\n", deployment.Name) + + o := kubectlrollout.NewRolloutRestartOptions(ioStreams) + + if err := o.Complete(hFactory, nil, []string{"deployments"}); err != nil { + panic(err) } - term.PrintContextSeparatorWithBodyf(deploymentList, "Existing deployments in %s namespace", ns) - return nil + o.Namespace = ns + o.Resources = []string{"deployment/" + deployment.Name} + + if err := o.Validate(); err != nil { + panic(err) + } + return o.RunRestart() } -func scaleToZero(cl runtimeclient.Client, namespacedName types.NamespacedName) (int32, error) { - // get the deployment - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return 0, err +func checkRolloutStatus(ns string) error { + kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() + Factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) + ioStreams := genericclioptions.IOStreams{ + In: nil, // Not to forward the Standard Input + Out: os.Stdout, + ErrOut: os.Stderr, } - // keep original number of replicas so we can bring it back - originalReplicas := *deployment.Spec.Replicas - zero := int32(0) - deployment.Spec.Replicas = &zero - // update the deployment so it scales to zero - return originalReplicas, cl.Update(context.TODO(), deployment) + cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) + + if err := cmd.Complete(Factory, []string{"deployment"}); err != nil { + panic(err) + } + cmd.LabelSelector = "provider=codeready-toolchain" + cmd.Namespace = ns + if err := cmd.Validate(); err != nil { + panic(err) + } + return cmd.Run() } -func scaleBack(term ioutils.Terminal, cl runtimeclient.Client, namespacedName types.NamespacedName, originalReplicas int32) error { - return wait.Poll(500*time.Millisecond, 10*time.Second, func() (done bool, err error) { - term.Println("") - term.Printlnf("Trying to scale the deployment back to '%d'", originalReplicas) - // get the updated - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return false, err - } - // check if the replicas number wasn't already reset by a controller - if *deployment.Spec.Replicas == originalReplicas { - return true, nil - } - // set the original - deployment.Spec.Replicas = &originalReplicas - // and update to scale back - if err := cl.Update(context.TODO(), deployment); err != nil { - term.Printlnf("error updating Deployment '%s': %s. Will retry again...", namespacedName.Name, err.Error()) - return false, nil - } - return true, nil - }) +func getExistingDeployments(cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { + + olmDeployments := &appsv1.DeploymentList{} + if err := cl.List(context.TODO(), olmDeployments, + runtimeclient.InNamespace(ns), + runtimeclient.MatchingLabels{"olm.owner.kind": "ClusterServiceVersion"}); err != nil { + return nil, nil, err + } + + nonOlmDeployments := &appsv1.DeploymentList{} + if err := cl.List(context.TODO(), nonOlmDeployments, + runtimeclient.InNamespace(ns), + runtimeclient.MatchingLabels{"provider": "codeready-toolchain"}); err != nil { + return nil, nil, err + } + + return olmDeployments, nonOlmDeployments, nil } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 32197c3..b01ca35 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -150,67 +150,67 @@ func TestRestartHostOperator(t *testing.T) { // given SetFileConfig(t, Host()) term := NewFakeTerminalWithResponse("") // it should not read the input - cfg, err := configuration.LoadClusterConfig(term, "host") + _, err := configuration.LoadClusterConfig(term, "host") require.NoError(t, err) - namespacedName := types.NamespacedName{ - Namespace: "toolchain-host-operator", - Name: "host-operator-controller-manager", - } - - t.Run("host deployment is present and restart successful", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 2, numberOfUpdateCalls) - }) - - t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) - }) - - t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - deployment2 := deployment.DeepCopy() - deployment2.Name = "another" - newClient, fakeClient := NewFakeClients(t, deployment, deployment2) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // then - require.Error(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - assert.Equal(t, 0, numberOfUpdateCalls) - }) + // namespacedName := types.NamespacedName{ + // Namespace: "toolchain-host-operator", + // Name: "host-operator-controller-manager", + // } + + // t.Run("host deployment is present and restart successful", func(t *testing.T) { + // // given + // deployment := newDeployment(namespacedName, 1) + // deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} + // newClient, fakeClient := NewFakeClients(t, deployment) + // numberOfUpdateCalls := 0 + // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + // ctx := clicontext.NewCommandContext(term, newClient) + + // // when + // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + + // // then + // require.NoError(t, err) + // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + // assert.Equal(t, 2, numberOfUpdateCalls) + // }) + + // t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { + // // given + // deployment := newDeployment(namespacedName, 1) + // newClient, fakeClient := NewFakeClients(t, deployment) + // numberOfUpdateCalls := 0 + // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + // ctx := clicontext.NewCommandContext(term, newClient) + + // // when + // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + + // // then + // require.Error(t, err) + // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + // assert.Equal(t, 0, numberOfUpdateCalls) + // }) + + // t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { + // // given + // deployment := newDeployment(namespacedName, 1) + // deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} + // deployment2 := deployment.DeepCopy() + // deployment2.Name = "another" + // newClient, fakeClient := NewFakeClients(t, deployment, deployment2) + // numberOfUpdateCalls := 0 + // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + // ctx := clicontext.NewCommandContext(term, newClient) + + // // when + // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) + + // // then + // require.Error(t, err) + // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + // assert.Equal(t, 0, numberOfUpdateCalls) + // }) } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index fd177b7..dc1557b 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -62,5 +62,6 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - return restartHostOperator(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) + //return restartHostOperator(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) + return nil } From da57803b2bd94439e56732b2cc2f096ae59dea5b Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 13 Sep 2024 16:20:06 +0530 Subject: [PATCH 02/40] some checking Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 1 + pkg/cmd/adm/restart_test.go | 105 ++++++++++++------------------- pkg/cmd/adm/unregister_member.go | 3 +- 3 files changed, 41 insertions(+), 68 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 60ed006..c3738dd 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -103,6 +103,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym if err := cl.List(ctx, &pods, runtimeclient.MatchingLabelsSelector{Selector: selector}); err != nil { return err } + //delete pods for _, pod := range pods.Items { if err := cl.Delete(ctx, &pod); err != nil { diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index b01ca35..e7e4b99 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -48,7 +48,6 @@ func TestRestartDeployment(t *testing.T) { // then require.NoError(t, err) AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 2, numberOfUpdateCalls) }) t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { @@ -64,11 +63,9 @@ func TestRestartDeployment(t *testing.T) { err := restart(ctx, clusterName) // then - require.EqualError(t, err, "at least one deployment name is required, include one or more of the above deployments to restart") + require.EqualError(t, err, "please mention one of the following operator names to restart: host | member-1 | member-2") AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") }) t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { @@ -109,8 +106,6 @@ func TestRestartDeployment(t *testing.T) { AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) assert.Equal(t, 0, numberOfUpdateCalls) assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") - assert.Contains(t, term.Output(), fmt.Sprintf("Existing deployments in toolchain-%s-operator namespace", clusterType)) - assert.Contains(t, term.Output(), "cool-deployment") }) } } @@ -150,67 +145,45 @@ func TestRestartHostOperator(t *testing.T) { // given SetFileConfig(t, Host()) term := NewFakeTerminalWithResponse("") // it should not read the input - _, err := configuration.LoadClusterConfig(term, "host") + cfg, err := configuration.LoadClusterConfig(term, "host") require.NoError(t, err) - // namespacedName := types.NamespacedName{ - // Namespace: "toolchain-host-operator", - // Name: "host-operator-controller-manager", - // } - - // t.Run("host deployment is present and restart successful", func(t *testing.T) { - // // given - // deployment := newDeployment(namespacedName, 1) - // deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - // newClient, fakeClient := NewFakeClients(t, deployment) - // numberOfUpdateCalls := 0 - // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - // ctx := clicontext.NewCommandContext(term, newClient) - - // // when - // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // // then - // require.NoError(t, err) - // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - // assert.Equal(t, 2, numberOfUpdateCalls) - // }) - - // t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // // given - // deployment := newDeployment(namespacedName, 1) - // newClient, fakeClient := NewFakeClients(t, deployment) - // numberOfUpdateCalls := 0 - // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - // ctx := clicontext.NewCommandContext(term, newClient) - - // // when - // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // // then - // require.Error(t, err) - // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - // assert.Equal(t, 0, numberOfUpdateCalls) - // }) - - // t.Run("there are more deployments with the host operator label - restart fails", func(t *testing.T) { - // // given - // deployment := newDeployment(namespacedName, 1) - // deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} - // deployment2 := deployment.DeepCopy() - // deployment2.Name = "another" - // newClient, fakeClient := NewFakeClients(t, deployment, deployment2) - // numberOfUpdateCalls := 0 - // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - // ctx := clicontext.NewCommandContext(term, newClient) - - // // when - // err := restartHostOperator(ctx, fakeClient, cfg.OperatorNamespace) - - // // then - // require.Error(t, err) - // AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) - // assert.Equal(t, 0, numberOfUpdateCalls) - // }) + namespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "host-operator-controller-manager", + } + + t.Run("host deployment is present and restart successful", func(t *testing.T) { + // given + deployment := newDeployment(namespacedName, 1) + deployment.Labels = map[string]string{"olm.owner.kind": "ClusterServiceVersion"} + newClient, fakeClient := NewFakeClients(t, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + ctx := clicontext.NewCommandContext(term, newClient) + + // when + err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) + + // then + require.NoError(t, err) + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + }) + + t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { + // given + deployment := newDeployment(namespacedName, 1) + newClient, fakeClient := NewFakeClients(t, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + ctx := clicontext.NewCommandContext(term, newClient) + + // when + err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) + + // then + require.NoError(t, err) + + }) } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index dc1557b..2fb3af7 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -62,6 +62,5 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - //return restartHostOperator(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) - return nil + return restartDeployment(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) } From f2c29ee64c66bffe9b5c44047db9b8996c9e7f1a Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 13 Sep 2024 16:43:37 +0530 Subject: [PATCH 03/40] golint Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 1 + pkg/cmd/adm/restart_test.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index c3738dd..af7ebb7 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -106,6 +106,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym //delete pods for _, pod := range pods.Items { + pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview if err := cl.Delete(ctx, &pod); err != nil { return err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index e7e4b99..0bb2b23 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -155,7 +155,7 @@ func TestRestartHostOperator(t *testing.T) { t.Run("host deployment is present and restart successful", func(t *testing.T) { // given deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"olm.owner.kind": "ClusterServiceVersion"} + deployment.Labels = map[string]string{"provider": "codeready-toolchain"} newClient, fakeClient := NewFakeClients(t, deployment) numberOfUpdateCalls := 0 fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) From ba8866e978a4e1d206e4a14f822205b68a932608 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 17 Sep 2024 15:37:42 +0530 Subject: [PATCH 04/40] few changes to the logic Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 78 ++++----- pkg/cmd/adm/restart_test.go | 281 +++++++++++++++---------------- pkg/cmd/adm/unregister_member.go | 2 +- 3 files changed, 180 insertions(+), 181 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index af7ebb7..08c88dc 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -28,7 +28,7 @@ import ( func NewRestartCmd() *cobra.Command { var targetCluster string command := &cobra.Command{ - Use: "restart -t ", + Use: "restart -t ", Short: "Restarts a deployment", Long: `Restarts the deployment with the given name in the operator namespace. If no deployment name is provided, then it lists all existing deployments in the namespace.`, @@ -36,7 +36,7 @@ If no deployment name is provided, then it lists all existing deployments in the RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return restart(ctx, targetCluster, args...) + return restart(ctx, targetCluster) }, } command.Flags().StringVarP(&targetCluster, "target-cluster", "t", "", "The target cluster") @@ -44,30 +44,46 @@ If no deployment name is provided, then it lists all existing deployments in the return command } -func restart(ctx *clicontext.CommandContext, clusterName string, operatorType ...string) error { +func restart(ctx *clicontext.CommandContext, clusterName string) error { + kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() + factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) + ioStreams := genericclioptions.IOStreams{ + In: os.Stdin, + Out: os.Stdout, + ErrOut: os.Stderr, + } + kubeConfigFlags.ClusterName = nil // `cluster` flag is redefined for our own purpose + kubeConfigFlags.AuthInfoName = nil // unused here, so we can hide it + kubeConfigFlags.Context = nil // unused here, so we can hide it + cfg, err := configuration.LoadClusterConfig(ctx, clusterName) if err != nil { return err } - cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) - + kubeConfigFlags.Namespace = &cfg.OperatorNamespace + kubeConfigFlags.APIServer = &cfg.ServerAPI + kubeConfigFlags.BearerToken = &cfg.Token + kubeconfig, err := client.EnsureKsctlConfigFile() if err != nil { return err } + kubeConfigFlags.KubeConfig = &kubeconfig - if len(operatorType) == 0 { - return fmt.Errorf("please mention one of the following operator names to restart: host | member-1 | member-2") - } + cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) - if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", operatorType[0], cfg.OperatorNamespace)) { - return nil + if err != nil { + return err } - return restartDeployment(ctx, cl, cfg.OperatorNamespace) + // if !ctx.AskForConfirmation( + // ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", clusterName, cfg.OperatorNamespace)) { + // return nil + // } + + return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams) } -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) error { +func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) if err != nil { return err @@ -77,7 +93,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return fmt.Errorf("OLM based deploymont not found in %s", ns) } for _, olmDeployment := range olmDeploymentList.Items { - if err := deletePods(ctx, cl, olmDeployment, ns); err != nil { + if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { return err } } @@ -85,18 +101,18 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return fmt.Errorf("non-OLM based deploymont not found in %s", ns) } for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - if err := restartNonOlmDeployments(ns, nonOlmDeployment); err != nil { + if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { return err } //check the rollout status - if err := checkRolloutStatus(ns); err != nil { + if err := checkRolloutStatus(f, ioStreams); err != nil { return err } } return nil } -func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, ns string) error { +func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -113,28 +129,21 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym } //check the rollout status - if err := checkRolloutStatus(ns); err != nil { + if err := checkRolloutStatus(f, ioStreams); err != nil { return err } return nil } -func restartNonOlmDeployments(ns string, deployment appsv1.Deployment) error { - kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() - hFactory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) - ioStreams := genericclioptions.IOStreams{ - In: nil, // Not to forward the Standard Input - Out: os.Stdout, - ErrOut: os.Stderr, - } +func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) - if err := o.Complete(hFactory, nil, []string{"deployments"}); err != nil { + if err := o.Complete(f, nil, []string{"deployments"}); err != nil { panic(err) } - o.Namespace = ns + o.Resources = []string{"deployment/" + deployment.Name} if err := o.Validate(); err != nil { @@ -143,22 +152,13 @@ func restartNonOlmDeployments(ns string, deployment appsv1.Deployment) error { return o.RunRestart() } -func checkRolloutStatus(ns string) error { - kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() - Factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) - ioStreams := genericclioptions.IOStreams{ - In: nil, // Not to forward the Standard Input - Out: os.Stdout, - ErrOut: os.Stderr, - } - +func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) - if err := cmd.Complete(Factory, []string{"deployment"}); err != nil { + if err := cmd.Complete(f, []string{"deployment"}); err != nil { panic(err) } cmd.LabelSelector = "provider=codeready-toolchain" - cmd.Namespace = ns if err := cmd.Validate(); err != nil { panic(err) } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 0bb2b23..2116bdf 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -2,7 +2,6 @@ package adm import ( "context" - "fmt" "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" @@ -18,134 +17,134 @@ import ( runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func TestRestartDeployment(t *testing.T) { - // given - SetFileConfig(t, Host(), Member()) - - for _, clusterName := range []string{"host", "member1"} { - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member - } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", - } - term := NewFakeTerminalWithResponse("Y") - - t.Run("restart is successful for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - }) - - t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName) - - // then - require.EqualError(t, err, "please mention one of the following operator names to restart: host | member-1 | member-2") - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - }) - - t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { - return fmt.Errorf("some error") - } - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.Error(t, err) - fakeClient.MockGet = nil - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - }) - - t.Run("restart fails - deployment not found for "+clusterName, func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "wrong-deployment") - - // then - require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - assert.Equal(t, 0, numberOfUpdateCalls) - assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") - }) - } -} - -func TestRestartDeploymentWithInsufficientPermissions(t *testing.T) { - // given - SetFileConfig(t, Host(NoToken()), Member(NoToken())) - for _, clusterName := range []string{"host", "member1"} { - // given - clusterType := configuration.Host - if clusterName != "host" { - clusterType = configuration.Member - } - namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) - namespacedName := types.NamespacedName{ - Namespace: namespace, - Name: "cool-deployment", - } - deployment := newDeployment(namespacedName, 3) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - term := NewFakeTerminalWithResponse("Y") - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, clusterName, "cool-deployment") - - // then - require.Error(t, err) - assert.Equal(t, 0, numberOfUpdateCalls) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) - } -} +// func TestRestartDeployment(t *testing.T) { +// // given +// SetFileConfig(t, Host(), Member()) + +// for _, clusterName := range []string{"host", "member1"} { +// clusterType := configuration.Host +// if clusterName != "host" { +// clusterType = configuration.Member +// } +// namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) +// namespacedName := types.NamespacedName{ +// Namespace: namespace, +// Name: "cool-deployment", +// } +// term := NewFakeTerminalWithResponse("Y") + +// t.Run("restart is successful for "+clusterName, func(t *testing.T) { +// // given +// deployment := newDeployment(namespacedName, 3) +// newClient, fakeClient := NewFakeClients(t, deployment) +// numberOfUpdateCalls := 0 +// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) +// ctx := clicontext.NewCommandContext(term, newClient) + +// // when +// err := restart(ctx, clusterName, "cool-deployment") + +// // then +// require.NoError(t, err) +// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) +// }) + +// t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { +// // given +// deployment := newDeployment(namespacedName, 3) +// newClient, fakeClient := NewFakeClients(t, deployment) +// numberOfUpdateCalls := 0 +// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) +// term := NewFakeTerminalWithResponse("Y") +// ctx := clicontext.NewCommandContext(term, newClient) + +// // when +// err := restart(ctx, clusterName) + +// // then +// require.EqualError(t, err, "please mention one of the following operator names to restart: host | member-1 | member-2") +// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) +// assert.Equal(t, 0, numberOfUpdateCalls) +// }) + +// t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { +// // given +// deployment := newDeployment(namespacedName, 3) +// newClient, fakeClient := NewFakeClients(t, deployment) +// numberOfUpdateCalls := 0 +// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) +// fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { +// return fmt.Errorf("some error") +// } +// ctx := clicontext.NewCommandContext(term, newClient) + +// // when +// err := restart(ctx, clusterName, "cool-deployment") + +// // then +// require.Error(t, err) +// fakeClient.MockGet = nil +// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) +// assert.Equal(t, 0, numberOfUpdateCalls) +// }) + +// t.Run("restart fails - deployment not found for "+clusterName, func(t *testing.T) { +// // given +// deployment := newDeployment(namespacedName, 3) +// newClient, fakeClient := NewFakeClients(t, deployment) +// numberOfUpdateCalls := 0 +// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) +// term := NewFakeTerminalWithResponse("Y") +// ctx := clicontext.NewCommandContext(term, newClient) + +// // when +// err := restart(ctx, clusterName, "wrong-deployment") + +// // then +// require.NoError(t, err) +// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) +// assert.Equal(t, 0, numberOfUpdateCalls) +// assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") +// }) +// } +// } + +// func TestRestartDeploymentWithInsufficientPermissions(t *testing.T) { +// // given +// SetFileConfig(t, Host(NoToken()), Member(NoToken())) +// for _, clusterName := range []string{"host", "member1"} { +// // given +// clusterType := configuration.Host +// if clusterName != "host" { +// clusterType = configuration.Member +// } +// namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) +// namespacedName := types.NamespacedName{ +// Namespace: namespace, +// Name: "cool-deployment", +// } +// deployment := newDeployment(namespacedName, 3) +// newClient, fakeClient := NewFakeClients(t, deployment) +// numberOfUpdateCalls := 0 +// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) +// term := NewFakeTerminalWithResponse("Y") +// ctx := clicontext.NewCommandContext(term, newClient) + +// // when +// err := restart(ctx, clusterName, "cool-deployment") + +// // then +// require.Error(t, err) +// assert.Equal(t, 0, numberOfUpdateCalls) +// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) +// } +// } func TestRestartHostOperator(t *testing.T) { // given SetFileConfig(t, Host()) term := NewFakeTerminalWithResponse("") // it should not read the input - cfg, err := configuration.LoadClusterConfig(term, "host") + _, err := configuration.LoadClusterConfig(term, "host") require.NoError(t, err) namespacedName := types.NamespacedName{ Namespace: "toolchain-host-operator", @@ -156,34 +155,34 @@ func TestRestartHostOperator(t *testing.T) { // given deployment := newDeployment(namespacedName, 1) deployment.Labels = map[string]string{"provider": "codeready-toolchain"} - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + newClient, _ := NewFakeClients(t, deployment) + //numberOfUpdateCalls := 0 + //fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) ctx := clicontext.NewCommandContext(term, newClient) // when - err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) + err := restart(ctx, "host") // then require.NoError(t, err) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + //AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) }) - t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // given - deployment := newDeployment(namespacedName, 1) - newClient, fakeClient := NewFakeClients(t, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - ctx := clicontext.NewCommandContext(term, newClient) + // t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { + // // given + // deployment := newDeployment(namespacedName, 1) + // newClient, fakeClient := NewFakeClients(t, deployment) + // numberOfUpdateCalls := 0 + // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) + // ctx := clicontext.NewCommandContext(term, newClient) - // when - err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) + // // when + // err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) - // then - require.NoError(t, err) + // // then + // require.NoError(t, err) - }) + // }) } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index 2fb3af7..6d85d48 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -62,5 +62,5 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - return restartDeployment(ctx, hostClusterClient, hostClusterConfig.OperatorNamespace) + return restart(ctx, clusterName) } From cd4b1bf64e254b9adbf676dcb2cf582840236bef Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 17 Sep 2024 17:04:43 +0530 Subject: [PATCH 05/40] t cases Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 8 ++++---- pkg/cmd/adm/restart_test.go | 11 ++++++++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 08c88dc..30fe9d1 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -105,7 +105,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return err } //check the rollout status - if err := checkRolloutStatus(f, ioStreams); err != nil { + if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { return err } } @@ -129,7 +129,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym } //check the rollout status - if err := checkRolloutStatus(f, ioStreams); err != nil { + if err := checkRolloutStatus(f, ioStreams, "olm.owner.kind=ClusterServiceVersion"); err != nil { return err } return nil @@ -152,13 +152,13 @@ func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, i return o.RunRestart() } -func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) if err := cmd.Complete(f, []string{"deployment"}); err != nil { panic(err) } - cmd.LabelSelector = "provider=codeready-toolchain" + cmd.LabelSelector = labelSelector if err := cmd.Validate(); err != nil { panic(err) } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 2116bdf..e13f7cd 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" + "github.com/h2non/gock" "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" @@ -142,6 +143,12 @@ import ( func TestRestartHostOperator(t *testing.T) { // given + defer gock.Off() + gock.New("https://cool-server.com"). + Post("cool-server.com/v1/namespaces/toolchain-host-operator/cool-token"). + Persist(). + Reply(200). + BodyString("ok") SetFileConfig(t, Host()) term := NewFakeTerminalWithResponse("") // it should not read the input _, err := configuration.LoadClusterConfig(term, "host") @@ -156,8 +163,6 @@ func TestRestartHostOperator(t *testing.T) { deployment := newDeployment(namespacedName, 1) deployment.Labels = map[string]string{"provider": "codeready-toolchain"} newClient, _ := NewFakeClients(t, deployment) - //numberOfUpdateCalls := 0 - //fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) ctx := clicontext.NewCommandContext(term, newClient) // when @@ -165,7 +170,7 @@ func TestRestartHostOperator(t *testing.T) { // then require.NoError(t, err) - //AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 1) + }) // t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { From 4c15cf0349b41bd932dd040fb51dcbc8aa015be5 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 19 Sep 2024 13:47:13 +0530 Subject: [PATCH 06/40] eview comments Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 48 ++++++++++++++++++------------------- pkg/cmd/adm/restart_test.go | 5 ++-- 2 files changed, 27 insertions(+), 26 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 30fe9d1..1a15a9e 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -6,7 +6,6 @@ import ( "os" "github.com/kubesaw/ksctl/pkg/client" - "github.com/kubesaw/ksctl/pkg/cmd/flags" "github.com/kubesaw/ksctl/pkg/configuration" clicontext "github.com/kubesaw/ksctl/pkg/context" "github.com/kubesaw/ksctl/pkg/ioutils" @@ -26,9 +25,8 @@ import ( // 2. If the command is run for member operator, it restart the whole member operator.(it deletes olm based pods(member-operator pods), // waits for the new deployment to come up, then uses rollout-restart command for non-olm based deployments - webhooks) func NewRestartCmd() *cobra.Command { - var targetCluster string command := &cobra.Command{ - Use: "restart -t ", + Use: "restart ", Short: "Restarts a deployment", Long: `Restarts the deployment with the given name in the operator namespace. If no deployment name is provided, then it lists all existing deployments in the namespace.`, @@ -36,15 +34,14 @@ If no deployment name is provided, then it lists all existing deployments in the RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return restart(ctx, targetCluster) + return restart(ctx, args...) }, } - command.Flags().StringVarP(&targetCluster, "target-cluster", "t", "", "The target cluster") - flags.MustMarkRequired(command, "target-cluster") return command } -func restart(ctx *clicontext.CommandContext, clusterName string) error { +func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { + clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) ioStreams := genericclioptions.IOStreams{ @@ -75,10 +72,10 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { return err } - // if !ctx.AskForConfirmation( - // ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", clusterName, cfg.OperatorNamespace)) { - // return nil - // } + if !ctx.AskForConfirmation( + ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", clusterName, cfg.OperatorNamespace)) { + return nil + } return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams) } @@ -90,24 +87,25 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } if olmDeploymentList == nil { - return fmt.Errorf("OLM based deploymont not found in %s", ns) + return fmt.Errorf("OLM based deployment not found in %s", ns) } for _, olmDeployment := range olmDeploymentList.Items { if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { return err } } - if nonOlmDeploymentlist == nil { - return fmt.Errorf("non-OLM based deploymont not found in %s", ns) - } - for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { - return err - } - //check the rollout status - if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { - return err + if nonOlmDeploymentlist != nil { + for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { + if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { + return err + } + //check the rollout status + if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { + return err + } } + } else { + fmt.Printf("non-OLM based deployment not found in %s", ns) } return nil } @@ -116,7 +114,9 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - if err := cl.List(ctx, &pods, runtimeclient.MatchingLabelsSelector{Selector: selector}); err != nil { + if err := cl.List(ctx, &pods, + runtimeclient.MatchingLabelsSelector{Selector: selector}, + runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } @@ -170,7 +170,7 @@ func getExistingDeployments(cl runtimeclient.Client, ns string) (*appsv1.Deploym olmDeployments := &appsv1.DeploymentList{} if err := cl.List(context.TODO(), olmDeployments, runtimeclient.InNamespace(ns), - runtimeclient.MatchingLabels{"olm.owner.kind": "ClusterServiceVersion"}); err != nil { + runtimeclient.MatchingLabels{"control-plane": "kubesaw-controller-manager"}); err != nil { return nil, nil, err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index e13f7cd..0862023 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -145,7 +145,7 @@ func TestRestartHostOperator(t *testing.T) { // given defer gock.Off() gock.New("https://cool-server.com"). - Post("cool-server.com/v1/namespaces/toolchain-host-operator/cool-token"). + Post("https://cool-server.com/api/v1/namespaces/toolchain-host-operator/cool-token"). Persist(). Reply(200). BodyString("ok") @@ -162,7 +162,8 @@ func TestRestartHostOperator(t *testing.T) { // given deployment := newDeployment(namespacedName, 1) deployment.Labels = map[string]string{"provider": "codeready-toolchain"} - newClient, _ := NewFakeClients(t, deployment) + newClient, fakeClient := NewFakeClients(t, deployment) + mockCreateToolchainClusterWithReadyCondition(fakeClient) ctx := clicontext.NewCommandContext(term, newClient) // when From 8796901664e036ce19cbdfb290cabb2f5be2a79f Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 19 Sep 2024 17:11:52 +0530 Subject: [PATCH 07/40] Review comments Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 1a15a9e..c945722 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -27,9 +27,9 @@ import ( func NewRestartCmd() *cobra.Command { command := &cobra.Command{ Use: "restart ", - Short: "Restarts a deployment", - Long: `Restarts the deployment with the given name in the operator namespace. -If no deployment name is provided, then it lists all existing deployments in the namespace.`, + Short: "Restarts an operator", + Long: `Restarts the whole operator in the given cluster name. + It restarts the operator and checks the status of the deployment`, Args: cobra.RangeArgs(0, 1), RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) @@ -81,6 +81,8 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s", ns) + olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) if err != nil { return err @@ -90,16 +92,22 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return fmt.Errorf("OLM based deployment not found in %s", ns) } for _, olmDeployment := range olmDeploymentList.Items { + fmt.Printf("Proceeding to delete the Pods of %v", olmDeployment) + if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { return err } } if nonOlmDeploymentlist != nil { for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { + + fmt.Printf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment) + if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { return err } //check the rollout status + fmt.Printf("Checking the status of the rolled out deployment %v", nonOlmDeployment) if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { return err } @@ -111,6 +119,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + fmt.Printf("Listing the pods to be deleted") //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -119,7 +128,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - + fmt.Printf("Starting to delete the pods") //delete pods for _, pod := range pods.Items { pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview @@ -128,8 +137,9 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym } } + fmt.Printf("Checking the status of the rolled out deployment %v", deployment) //check the rollout status - if err := checkRolloutStatus(f, ioStreams, "olm.owner.kind=ClusterServiceVersion"); err != nil { + if err := checkRolloutStatus(f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { return err } return nil @@ -140,7 +150,7 @@ func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, i o := kubectlrollout.NewRolloutRestartOptions(ioStreams) - if err := o.Complete(f, nil, []string{"deployments"}); err != nil { + if err := o.Complete(f, nil, []string{"deployment"}); err != nil { panic(err) } @@ -149,6 +159,7 @@ func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, i if err := o.Validate(); err != nil { panic(err) } + fmt.Printf("Running the rollout restart command for non-olm deployment %v", deployment) return o.RunRestart() } @@ -162,6 +173,7 @@ func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams if err := cmd.Validate(); err != nil { panic(err) } + fmt.Printf("Running the Rollout status to check the status of the deployment") return cmd.Run() } @@ -170,7 +182,7 @@ func getExistingDeployments(cl runtimeclient.Client, ns string) (*appsv1.Deploym olmDeployments := &appsv1.DeploymentList{} if err := cl.List(context.TODO(), olmDeployments, runtimeclient.InNamespace(ns), - runtimeclient.MatchingLabels{"control-plane": "kubesaw-controller-manager"}); err != nil { + runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { return nil, nil, err } From 1d68d341f2bc9d448e2903d9df35d0e6419f0e9e Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 19 Sep 2024 17:19:42 +0530 Subject: [PATCH 08/40] check the args Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index c945722..c92ee29 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -41,6 +41,9 @@ func NewRestartCmd() *cobra.Command { } func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { + if clusterNames == nil { + return fmt.Errorf("please provide a cluster name to restart the operator e.g `ksctl adm restart host`") + } clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) From 47bc27ef8bf8997ae3e1895041660ba6b252fedb Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Mon, 23 Sep 2024 17:15:22 +0530 Subject: [PATCH 09/40] adding unit test cases Signed-off-by: Feny Mehta --- pkg/cmd/adm/register_member_test.go | 17 ++ pkg/cmd/adm/restart.go | 10 +- pkg/cmd/adm/restart_test.go | 264 ++++++++------------------ pkg/cmd/adm/unregister_member_test.go | 1 - 4 files changed, 100 insertions(+), 192 deletions(-) diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index db52b8c..d4ac749 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -600,3 +600,20 @@ func defaultRegisterMemberArgs() registerMemberArgs { return args } + +func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { + // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") + // on the other calls, it's the opposite + if *numberOfUpdateCalls == 0 { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) + // check the requested deployment's replicas field + assert.Equal(t, int32(0), *deployment.Spec.Replicas) + } else { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) + // check the requested deployment's replicas field + assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) + } + *numberOfUpdateCalls++ +} diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index c92ee29..9625e90 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -138,12 +138,12 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym if err := cl.Delete(ctx, &pod); err != nil { return err } - } - fmt.Printf("Checking the status of the rolled out deployment %v", deployment) - //check the rollout status - if err := checkRolloutStatus(f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { - return err + fmt.Printf("Checking the status of the rolled out deployment %v", deployment) + //check the rollout status + if err := checkRolloutStatus(f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + return err + } } return nil diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 0862023..291c4f1 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -1,194 +1,108 @@ package adm import ( - "context" + "bytes" + "io" + "net/http" "testing" - "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/h2non/gock" - "github.com/kubesaw/ksctl/pkg/configuration" - clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/rest/fake" + cgtesting "k8s.io/client-go/testing" + cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + "k8s.io/kubectl/pkg/scheme" ) -// func TestRestartDeployment(t *testing.T) { -// // given -// SetFileConfig(t, Host(), Member()) - -// for _, clusterName := range []string{"host", "member1"} { -// clusterType := configuration.Host -// if clusterName != "host" { -// clusterType = configuration.Member -// } -// namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) -// namespacedName := types.NamespacedName{ -// Namespace: namespace, -// Name: "cool-deployment", -// } -// term := NewFakeTerminalWithResponse("Y") - -// t.Run("restart is successful for "+clusterName, func(t *testing.T) { -// // given -// deployment := newDeployment(namespacedName, 3) -// newClient, fakeClient := NewFakeClients(t, deployment) -// numberOfUpdateCalls := 0 -// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) -// ctx := clicontext.NewCommandContext(term, newClient) - -// // when -// err := restart(ctx, clusterName, "cool-deployment") - -// // then -// require.NoError(t, err) -// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) -// }) - -// t.Run("list deployments when no deployment name is provided for "+clusterName, func(t *testing.T) { -// // given -// deployment := newDeployment(namespacedName, 3) -// newClient, fakeClient := NewFakeClients(t, deployment) -// numberOfUpdateCalls := 0 -// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) -// term := NewFakeTerminalWithResponse("Y") -// ctx := clicontext.NewCommandContext(term, newClient) - -// // when -// err := restart(ctx, clusterName) - -// // then -// require.EqualError(t, err, "please mention one of the following operator names to restart: host | member-1 | member-2") -// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) -// assert.Equal(t, 0, numberOfUpdateCalls) -// }) - -// t.Run("restart fails - cannot get the deployment for "+clusterName, func(t *testing.T) { -// // given -// deployment := newDeployment(namespacedName, 3) -// newClient, fakeClient := NewFakeClients(t, deployment) -// numberOfUpdateCalls := 0 -// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) -// fakeClient.MockGet = func(ctx context.Context, key runtimeclient.ObjectKey, obj runtimeclient.Object, opts ...runtimeclient.GetOption) error { -// return fmt.Errorf("some error") -// } -// ctx := clicontext.NewCommandContext(term, newClient) - -// // when -// err := restart(ctx, clusterName, "cool-deployment") - -// // then -// require.Error(t, err) -// fakeClient.MockGet = nil -// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) -// assert.Equal(t, 0, numberOfUpdateCalls) -// }) - -// t.Run("restart fails - deployment not found for "+clusterName, func(t *testing.T) { -// // given -// deployment := newDeployment(namespacedName, 3) -// newClient, fakeClient := NewFakeClients(t, deployment) -// numberOfUpdateCalls := 0 -// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) -// term := NewFakeTerminalWithResponse("Y") -// ctx := clicontext.NewCommandContext(term, newClient) - -// // when -// err := restart(ctx, clusterName, "wrong-deployment") - -// // then -// require.NoError(t, err) -// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) -// assert.Equal(t, 0, numberOfUpdateCalls) -// assert.Contains(t, term.Output(), "ERROR: The given deployment 'wrong-deployment' wasn't found.") -// }) -// } -// } - -// func TestRestartDeploymentWithInsufficientPermissions(t *testing.T) { -// // given -// SetFileConfig(t, Host(NoToken()), Member(NoToken())) -// for _, clusterName := range []string{"host", "member1"} { -// // given -// clusterType := configuration.Host -// if clusterName != "host" { -// clusterType = configuration.Member -// } -// namespace := fmt.Sprintf("toolchain-%s-operator", clusterType) -// namespacedName := types.NamespacedName{ -// Namespace: namespace, -// Name: "cool-deployment", -// } -// deployment := newDeployment(namespacedName, 3) -// newClient, fakeClient := NewFakeClients(t, deployment) -// numberOfUpdateCalls := 0 -// fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) -// term := NewFakeTerminalWithResponse("Y") -// ctx := clicontext.NewCommandContext(term, newClient) - -// // when -// err := restart(ctx, clusterName, "cool-deployment") - -// // then -// require.Error(t, err) -// assert.Equal(t, 0, numberOfUpdateCalls) -// AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) -// } -// } - -func TestRestartHostOperator(t *testing.T) { +func TestRestart(t *testing.T) { // given - defer gock.Off() - gock.New("https://cool-server.com"). - Post("https://cool-server.com/api/v1/namespaces/toolchain-host-operator/cool-token"). - Persist(). - Reply(200). - BodyString("ok") SetFileConfig(t, Host()) - term := NewFakeTerminalWithResponse("") // it should not read the input - _, err := configuration.LoadClusterConfig(term, "host") - require.NoError(t, err) namespacedName := types.NamespacedName{ Namespace: "toolchain-host-operator", Name: "host-operator-controller-manager", } + var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} + deployment1 := newDeployment(namespacedName, 1) + ns := scheme.Codecs.WithoutConversion() + tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) + tf.ClientConfigVal = cmdtesting.DefaultClientConfig() + + info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) + encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) + tf.Client = &RolloutRestartRESTClient{ + RESTClient: &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + responseDeployment := &appsv1.Deployment{} + responseDeployment.Name = deployment1.Name + responseDeployment.Labels = make(map[string]string) + responseDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), + }, + } + tf.FakeDynamicClient.WatchReactionChain = nil + tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + fw := watch.NewFake() + dep := &appsv1.Deployment{} + dep.Name = deployment1.Name + dep.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, + } + dep.Labels = make(map[string]string) + dep.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) + if err != nil { + t.Errorf("unexpected err %s", err) + } + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(c) + go fw.Add(u) + return true, fw, nil + }) - t.Run("host deployment is present and restart successful", func(t *testing.T) { + //add comments that it is checking the output from kubectl + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + t.Run("Rollout restart of non-olm deployments is successful", func(t *testing.T) { // given - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{"provider": "codeready-toolchain"} - newClient, fakeClient := NewFakeClients(t, deployment) - mockCreateToolchainClusterWithReadyCondition(fakeClient) - ctx := clicontext.NewCommandContext(term, newClient) - - // when - err := restart(ctx, "host") - // then + err := restartNonOlmDeployments(*deployment1, tf, streams) + expectedOutput := "deployment.apps/" + deployment1.Name + " restarted\n" require.NoError(t, err) + require.Contains(t, buf.String(), expectedOutput) }) - // t.Run("host deployment with the label is not present - restart fails", func(t *testing.T) { - // // given - // deployment := newDeployment(namespacedName, 1) - // newClient, fakeClient := NewFakeClients(t, deployment) - // numberOfUpdateCalls := 0 - // fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 1, &numberOfUpdateCalls) - // ctx := clicontext.NewCommandContext(term, newClient) + t.Run("check rollout status of deployments is successful", func(t *testing.T) { + //when + err := checkRolloutStatus(tf, streams, "kubesaw-control-plane=kubesaw-controller-manager") - // // when - // err := restartDeployment(ctx, fakeClient, cfg.OperatorNamespace) + //then + require.NoError(t, err) + + expectedMsg := "deployment \"host-operator-controller-manager\" successfully rolled out\n" + require.Contains(t, buf.String(), expectedMsg) - // // then - // require.NoError(t, err) + }) - // }) } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam @@ -203,28 +117,6 @@ func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1. } } -func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - deployment, ok := obj.(*appsv1.Deployment) - require.True(t, ok) - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - return fakeClient.Client.Update(ctx, obj, opts...) - } -} - -func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { - // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") - // on the other calls, it's the opposite - if *numberOfUpdateCalls == 0 { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) - // check the requested deployment's replicas field - assert.Equal(t, int32(0), *deployment.Spec.Replicas) - } else { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) - // check the requested deployment's replicas field - assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) - } - *numberOfUpdateCalls++ +type RolloutRestartRESTClient struct { + *fake.RESTClient } diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 72c2392..1407919 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -38,7 +38,6 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { assert.NotContains(t, term.Output(), "cool-token") AssertDeploymentHasReplicas(t, fakeClient, hostDeploymentName, 1) - assert.Equal(t, 2, numberOfUpdateCalls) } func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { From 8f56cbf2810e8a6fc2cb637416e39e1c4c57da91 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 25 Sep 2024 13:22:35 +0530 Subject: [PATCH 10/40] Change in test cases Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 33 ++++--- pkg/cmd/adm/restart_test.go | 188 ++++++++++++++++++++++-------------- 2 files changed, 135 insertions(+), 86 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 9625e90..d5e23b0 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -41,8 +41,8 @@ func NewRestartCmd() *cobra.Command { } func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { - if clusterNames == nil { - return fmt.Errorf("please provide a cluster name to restart the operator e.g `ksctl adm restart host`") + if clusterNames == nil || len(clusterNames) != 1 { + return fmt.Errorf("please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") } clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() @@ -76,7 +76,7 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the '%s' operator in namespace '%s'", clusterName, cfg.OperatorNamespace)) { + ioutils.WithMessagef("restart all the deployments in the cluster '%s' and namespace '%s' \n", clusterName, cfg.OperatorNamespace)) { return nil } @@ -84,45 +84,46 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s", ns) + fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s \n", ns) olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) if err != nil { return err } - if olmDeploymentList == nil { + if len(olmDeploymentList.Items) == 0 { return fmt.Errorf("OLM based deployment not found in %s", ns) - } - for _, olmDeployment := range olmDeploymentList.Items { - fmt.Printf("Proceeding to delete the Pods of %v", olmDeployment) + } else { + for _, olmDeployment := range olmDeploymentList.Items { + fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment) - if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { - return err + if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { + return err + } } } - if nonOlmDeploymentlist != nil { + if len(nonOlmDeploymentlist.Items) != 0 { for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - fmt.Printf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment) + fmt.Printf("Proceeding to restart the non-OLM deployment %v \n", nonOlmDeployment) if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { return err } //check the rollout status - fmt.Printf("Checking the status of the rolled out deployment %v", nonOlmDeployment) + fmt.Printf("Checking the status of the rolled out deployment %v \n", nonOlmDeployment) if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { return err } } } else { - fmt.Printf("non-OLM based deployment not found in %s", ns) + fmt.Printf("non-OLM based deployment not found in %s \n", ns) } return nil } func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Listing the pods to be deleted") + fmt.Printf("Listing the pods to be deleted \n") //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -131,7 +132,7 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - fmt.Printf("Starting to delete the pods") + fmt.Printf("Starting to delete the pods \n") //delete pods for _, pod := range pods.Items { pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 291c4f1..0dd30b0 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -6,6 +6,7 @@ import ( "net/http" "testing" + clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/require" @@ -25,84 +26,131 @@ import ( func TestRestart(t *testing.T) { // given - SetFileConfig(t, Host()) - namespacedName := types.NamespacedName{ - Namespace: "toolchain-host-operator", - Name: "host-operator-controller-manager", - } - var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} - deployment1 := newDeployment(namespacedName, 1) - ns := scheme.Codecs.WithoutConversion() - tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) - tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - - info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) - encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) - tf.Client = &RolloutRestartRESTClient{ - RESTClient: &fake.RESTClient{ - GroupVersion: rolloutGroupVersionEncoder, - NegotiatedSerializer: ns, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - responseDeployment := &appsv1.Deployment{} - responseDeployment.Name = deployment1.Name - responseDeployment.Labels = make(map[string]string) - responseDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) - return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil - }), + tests := map[string]struct { + namespace string + name string + labelKey string + labelValue string + expectedMsg string + labelSelector string + expectedOutput string + }{ + "OlmHostDeployment": { + namespace: "toolchain-host-operator", + name: "host-operator-controller-manager", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + }, + "NonOlmHostDeployment": { + namespace: "toolchain-host-operator", + name: "registration-service", + labelKey: "provider", + labelValue: "codeready-toolchain", + expectedMsg: "deployment \"registration-service\" successfully rolled out\n", + labelSelector: "provider=codeready-toolchain", + expectedOutput: "deployment.apps/registration-service restarted\n", + }, + "OlmMemberDeployment": { + namespace: "toolchain-member-operator", + name: "member-operator-controller-manager", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + }, + "NonOlmMemberDeployment": { + namespace: "toolchain-member-operator", + name: "member-webhooks", + labelKey: "provider", + labelValue: "codeready-toolchain", + expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n", + labelSelector: "provider=codeready-toolchain", + expectedOutput: "deployment.apps/member-webhooks restarted\n", }, } - tf.FakeDynamicClient.WatchReactionChain = nil - tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { - fw := watch.NewFake() - dep := &appsv1.Deployment{} - dep.Name = deployment1.Name - dep.Status = appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - ReadyReplicas: 1, - AvailableReplicas: 1, - UnavailableReplicas: 0, - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentAvailable, - }}, - } - dep.Labels = make(map[string]string) - dep.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) - if err != nil { - t.Errorf("unexpected err %s", err) - } - u := &unstructured.Unstructured{} - u.SetUnstructuredContent(c) - go fw.Add(u) - return true, fw, nil - }) - - //add comments that it is checking the output from kubectl - streams, _, buf, _ := genericclioptions.NewTestIOStreams() - t.Run("Rollout restart of non-olm deployments is successful", func(t *testing.T) { - // given + for k, tc := range tests { + t.Run(k, func(t *testing.T) { + //given + namespacedName := types.NamespacedName{ + Namespace: tc.namespace, + Name: tc.name, + } + var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} + deployment1 := newDeployment(namespacedName, 1) + ns := scheme.Codecs.WithoutConversion() + tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) + tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - err := restartNonOlmDeployments(*deployment1, tf, streams) - expectedOutput := "deployment.apps/" + deployment1.Name + " restarted\n" - require.NoError(t, err) - require.Contains(t, buf.String(), expectedOutput) + info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) + encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) + tf.Client = &RolloutRestartRESTClient{ + RESTClient: &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + responseDeployment := &appsv1.Deployment{} + responseDeployment.Name = deployment1.Name + responseDeployment.Labels = make(map[string]string) + responseDeployment.Labels[tc.labelKey] = tc.labelValue + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), + }, + } + tf.FakeDynamicClient.WatchReactionChain = nil + tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + fw := watch.NewFake() + dep := &appsv1.Deployment{} + dep.Name = deployment1.Name + dep.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, + } + dep.Labels = make(map[string]string) + dep.Labels[tc.labelKey] = tc.labelValue + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) + if err != nil { + t.Errorf("unexpected err %s", err) + } + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(c) + go fw.Add(u) + return true, fw, nil + }) - }) + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + deployment := newDeployment(namespacedName, 1) + deployment.Labels = map[string]string{tc.labelKey: tc.labelValue} + term := NewFakeTerminalWithResponse("Y") + newClient, fakeClient := NewFakeClients(t, deployment) + ctx := clicontext.NewCommandContext(term, newClient) - t.Run("check rollout status of deployments is successful", func(t *testing.T) { - //when - err := checkRolloutStatus(tf, streams, "kubesaw-control-plane=kubesaw-controller-manager") + //when + err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) + require.NoError(t, err) - //then - require.NoError(t, err) + err1 := checkRolloutStatus(tf, streams, tc.labelSelector) + require.NoError(t, err1) + //checking the output from kubectl + require.Contains(t, buf.String(), tc.expectedMsg) - expectedMsg := "deployment \"host-operator-controller-manager\" successfully rolled out\n" - require.Contains(t, buf.String(), expectedMsg) - - }) + if tc.labelValue == "codeready-toolchain" { + err := restartNonOlmDeployments(*deployment1, tf, streams) + require.NoError(t, err) + //checking the output from kubectl + require.Contains(t, buf.String(), tc.expectedOutput) + } + }) + } } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam From 92d0237bd2bf3e8198894fec211da883c8f8be14 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 25 Sep 2024 14:13:41 +0530 Subject: [PATCH 11/40] minor change in unit test Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 4 ++-- pkg/cmd/adm/restart_test.go | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index d5e23b0..7c5ddaa 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -97,7 +97,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, for _, olmDeployment := range olmDeploymentList.Items { fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment) - if err := deletePods(ctx, cl, olmDeployment, f, ioStreams); err != nil { + if err := deleteAndWaitForPods(ctx, cl, olmDeployment, f, ioStreams); err != nil { return err } } @@ -122,7 +122,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return nil } -func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { fmt.Printf("Listing the pods to be deleted \n") //get pods by label selector from the deployment pods := corev1.PodList{} diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 0dd30b0..7f7f2a6 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -135,20 +135,20 @@ func TestRestart(t *testing.T) { //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) - require.NoError(t, err) - - err1 := checkRolloutStatus(tf, streams, tc.labelSelector) - require.NoError(t, err1) - //checking the output from kubectl - require.Contains(t, buf.String(), tc.expectedMsg) - - if tc.labelValue == "codeready-toolchain" { + if tc.labelValue == "kubesaw-controller-manager" { + require.NoError(t, err) + } else if tc.labelValue == "codeready-toolchain" { + require.Error(t, err) err := restartNonOlmDeployments(*deployment1, tf, streams) require.NoError(t, err) //checking the output from kubectl require.Contains(t, buf.String(), tc.expectedOutput) - } + err1 := checkRolloutStatus(tf, streams, tc.labelSelector) + require.NoError(t, err1) + //checking the output from kubectl + require.Contains(t, buf.String(), tc.expectedMsg) + }) } } From c0332b11767d50b0101b6cf989e62d7794f2f5f2 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 25 Sep 2024 14:37:55 +0530 Subject: [PATCH 12/40] unregister-member test Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 2 +- pkg/cmd/adm/restart_test.go | 4 ++-- pkg/cmd/adm/unregister_member_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 7c5ddaa..a5f5271 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -92,7 +92,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } if len(olmDeploymentList.Items) == 0 { - return fmt.Errorf("OLM based deployment not found in %s", ns) + fmt.Printf("OLM based deployment not found in %s", ns) } else { for _, olmDeployment := range olmDeploymentList.Items { fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 7f7f2a6..36a42cc 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -136,9 +136,9 @@ func TestRestart(t *testing.T) { //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) if tc.labelValue == "kubesaw-controller-manager" { - require.NoError(t, err) + require.NoError(t, err, "non-OLM based deployment not found in") } else if tc.labelValue == "codeready-toolchain" { - require.Error(t, err) + require.NoError(t, err, "OLM based deployment not found in") err := restartNonOlmDeployments(*deployment1, tf, streams) require.NoError(t, err) //checking the output from kubectl diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 1407919..a1dca35 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -15,7 +15,7 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"olm.owner.namespace": "toolchain-host-operator"} + deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) numberOfUpdateCalls := 0 From 83e99b57e8f7a29e8fa633813ef7bc1b8c7d3f01 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 25 Sep 2024 15:07:10 +0530 Subject: [PATCH 13/40] unit test case for restart Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 1 - pkg/cmd/adm/restart_test.go | 54 ++++++++++++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 5 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index a5f5271..9ce25fe 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -70,7 +70,6 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { kubeConfigFlags.KubeConfig = &kubeconfig cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) - if err != nil { return err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 36a42cc..04b958e 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -6,6 +6,7 @@ import ( "net/http" "testing" + "github.com/codeready-toolchain/toolchain-common/pkg/test" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" @@ -24,7 +25,7 @@ import ( "k8s.io/kubectl/pkg/scheme" ) -func TestRestart(t *testing.T) { +func TestRestartDeployment(t *testing.T) { // given tests := map[string]struct { namespace string @@ -127,10 +128,8 @@ func TestRestart(t *testing.T) { }) streams, _, buf, _ := genericclioptions.NewTestIOStreams() - deployment := newDeployment(namespacedName, 1) - deployment.Labels = map[string]string{tc.labelKey: tc.labelValue} term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := NewFakeClients(t, deployment) + newClient, fakeClient := NewFakeClients(t, deployment1) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -153,6 +152,53 @@ func TestRestart(t *testing.T) { } } +func TestRestart(t *testing.T) { + t.Run("restart should fail if more than one clustername", func(t *testing.T) { + //given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com")) + deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + term := NewFakeTerminalWithResponse("Y") + newClient, _ := NewFakeClients(t, toolchainCluster, deployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restart(ctx, "host-cool-server.com", "member") + + //then + require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") + }) + t.Run("restart should fail if zero clustername", func(t *testing.T) { + //given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com")) + deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + term := NewFakeTerminalWithResponse("Y") + newClient, _ := NewFakeClients(t, toolchainCluster, deployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restart(ctx) + + //then + require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") + }) + t.Run("restart should succeed with 1 clustername", func(t *testing.T) { + //given + SetFileConfig(t, Host()) + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) + deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + term := NewFakeTerminalWithResponse("Y") + newClient, _ := NewFakeClients(t, toolchainCluster, deployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restart(ctx, "host") + + //then + require.NoError(t, err) + }) + +} + func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ From d5e528022024b28d97a22d0541ba38915af73b5b Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 25 Sep 2024 16:21:19 +0530 Subject: [PATCH 14/40] test case for delete Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 04b958e..aa412b7 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -129,13 +130,16 @@ func TestRestartDeployment(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() term := NewFakeTerminalWithResponse("Y") - newClient, fakeClient := NewFakeClients(t, deployment1) + pod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + newClient, fakeClient := NewFakeClients(t, deployment1, pod) ctx := clicontext.NewCommandContext(term, newClient) //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) if tc.labelValue == "kubesaw-controller-manager" { require.NoError(t, err, "non-OLM based deployment not found in") + err2 := deleteAndWaitForPods(ctx, fakeClient, *deployment1, tf, streams) + require.NoError(t, err2) } else if tc.labelValue == "codeready-toolchain" { require.NoError(t, err, "OLM based deployment not found in") err := restartNonOlmDeployments(*deployment1, tf, streams) @@ -207,6 +211,25 @@ func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1. }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"host": "controller"}}, + }, + } +} + +func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + Labels: map[string]string{"host": "controller"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Phase: "Running", }, } } From b6f3df1b69afccc382a437fecca24dbc3d2527e0 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 26 Sep 2024 15:01:01 +0530 Subject: [PATCH 15/40] Rc1 Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 60 ++++++++++++------------- pkg/cmd/adm/restart_test.go | 88 ++++++++++++++----------------------- 2 files changed, 64 insertions(+), 84 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 9ce25fe..561b4ac 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -1,8 +1,6 @@ package adm import ( - "context" - "fmt" "os" "github.com/kubesaw/ksctl/pkg/client" @@ -21,16 +19,21 @@ import ( // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config // 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods), -// waits for the new deployment to come up, then uses rollout-restart command for non-olm based - registration-service) +// waits for the new pods to come up, then uses rollout-restart command for non-olm based - registration-service) // 2. If the command is run for member operator, it restart the whole member operator.(it deletes olm based pods(member-operator pods), -// waits for the new deployment to come up, then uses rollout-restart command for non-olm based deployments - webhooks) +// waits for the new pods to come up, then uses rollout-restart command for non-olm based deployments - webhooks) func NewRestartCmd() *cobra.Command { command := &cobra.Command{ Use: "restart ", Short: "Restarts an operator", - Long: `Restarts the whole operator in the given cluster name. - It restarts the operator and checks the status of the deployment`, - Args: cobra.RangeArgs(0, 1), + Long: `Restarts the whole operator, it relies on the target cluster and fetches the cluster config + 1. If the command is run for host operator, it restart the whole host operator. + (it deletes olm based pods(host-operator pods),waits for the new pods to + come up, then uses rollout-restart command for non-olm based deployments - registration-service) + 2. If the command is run for member operator, it restart the whole member operator. + (it deletes olm based pods(member-operator pods),waits for the new pods + to come up, then uses rollout-restart command for non-olm based deployments - webhooks)`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) @@ -41,9 +44,6 @@ func NewRestartCmd() *cobra.Command { } func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { - if clusterNames == nil || len(clusterNames) != 1 { - return fmt.Errorf("please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") - } clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) @@ -83,18 +83,18 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Fetching the current OLM and non-OLM deployments of the operator in %s \n", ns) + ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) - olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(cl, ns) + olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(ctx, cl, ns) if err != nil { return err } if len(olmDeploymentList.Items) == 0 { - fmt.Printf("OLM based deployment not found in %s", ns) + ctx.Printlnf("No OLM based deployment restart happend as Olm deployment found in namespace %s is 0", ns) } else { for _, olmDeployment := range olmDeploymentList.Items { - fmt.Printf("Proceeding to delete the Pods of %v \n", olmDeployment) + ctx.Printlnf("Proceeding to delete the Pods of %v", olmDeployment) if err := deleteAndWaitForPods(ctx, cl, olmDeployment, f, ioStreams); err != nil { return err @@ -104,25 +104,25 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, if len(nonOlmDeploymentlist.Items) != 0 { for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - fmt.Printf("Proceeding to restart the non-OLM deployment %v \n", nonOlmDeployment) + ctx.Printlnf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment) - if err := restartNonOlmDeployments(nonOlmDeployment, f, ioStreams); err != nil { + if err := restartNonOlmDeployments(ctx, nonOlmDeployment, f, ioStreams); err != nil { return err } //check the rollout status - fmt.Printf("Checking the status of the rolled out deployment %v \n", nonOlmDeployment) - if err := checkRolloutStatus(f, ioStreams, "provider=codeready-toolchain"); err != nil { + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment) + if err := checkRolloutStatus(ctx, f, ioStreams, "provider=codeready-toolchain"); err != nil { return err } } } else { - fmt.Printf("non-OLM based deployment not found in %s \n", ns) + ctx.Printlnf("No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace %s is 0", ns) } return nil } func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - fmt.Printf("Listing the pods to be deleted \n") + ctx.Printlnf("Listing the pods to be deleted") //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -131,7 +131,7 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - fmt.Printf("Starting to delete the pods \n") + ctx.Printlnf("Starting to delete the pods") //delete pods for _, pod := range pods.Items { pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview @@ -139,9 +139,9 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien return err } - fmt.Printf("Checking the status of the rolled out deployment %v", deployment) + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", deployment) //check the rollout status - if err := checkRolloutStatus(f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { return err } } @@ -149,7 +149,7 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien } -func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) @@ -162,11 +162,11 @@ func restartNonOlmDeployments(deployment appsv1.Deployment, f cmdutil.Factory, i if err := o.Validate(); err != nil { panic(err) } - fmt.Printf("Running the rollout restart command for non-olm deployment %v", deployment) + ctx.Printlnf("Running the rollout restart command for non-olm deployment %v", deployment) return o.RunRestart() } -func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { +func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) if err := cmd.Complete(f, []string{"deployment"}); err != nil { @@ -176,21 +176,21 @@ func checkRolloutStatus(f cmdutil.Factory, ioStreams genericclioptions.IOStreams if err := cmd.Validate(); err != nil { panic(err) } - fmt.Printf("Running the Rollout status to check the status of the deployment") + ctx.Printlnf("Running the Rollout status to check the status of the deployment") return cmd.Run() } -func getExistingDeployments(cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { +func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { olmDeployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), olmDeployments, + if err := cl.List(ctx, olmDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { return nil, nil, err } nonOlmDeployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), nonOlmDeployments, + if err := cl.List(ctx, nonOlmDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"provider": "codeready-toolchain"}); err != nil { return nil, nil, err diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index aa412b7..43a83de 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -54,23 +54,23 @@ func TestRestartDeployment(t *testing.T) { labelSelector: "provider=codeready-toolchain", expectedOutput: "deployment.apps/registration-service restarted\n", }, - "OlmMemberDeployment": { - namespace: "toolchain-member-operator", - name: "member-operator-controller-manager", - labelKey: "kubesaw-control-plane", - labelValue: "kubesaw-controller-manager", - expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", - labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", - }, - "NonOlmMemberDeployment": { - namespace: "toolchain-member-operator", - name: "member-webhooks", - labelKey: "provider", - labelValue: "codeready-toolchain", - expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n", - labelSelector: "provider=codeready-toolchain", - expectedOutput: "deployment.apps/member-webhooks restarted\n", - }, + // "OlmMemberDeployment": { + // namespace: "toolchain-member-operator", + // name: "member-operator-controller-manager", + // labelKey: "kubesaw-control-plane", + // labelValue: "kubesaw-controller-manager", + // expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", + // labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + // }, + // "NonOlmMemberDeployment": { + // namespace: "toolchain-member-operator", + // name: "member-webhooks", + // labelKey: "provider", + // labelValue: "codeready-toolchain", + // expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n", + // labelSelector: "provider=codeready-toolchain", + // expectedOutput: "deployment.apps/member-webhooks restarted\n", + // }, } for k, tc := range tests { t.Run(k, func(t *testing.T) { @@ -131,60 +131,40 @@ func TestRestartDeployment(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() term := NewFakeTerminalWithResponse("Y") pod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + deployment1.Labels = make(map[string]string) + deployment1.Labels[tc.labelKey] = tc.labelValue newClient, fakeClient := NewFakeClients(t, deployment1, pod) ctx := clicontext.NewCommandContext(term, newClient) //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) if tc.labelValue == "kubesaw-controller-manager" { - require.NoError(t, err, "non-OLM based deployment not found in") - err2 := deleteAndWaitForPods(ctx, fakeClient, *deployment1, tf, streams) - require.NoError(t, err2) + require.NoError(t, err) + require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") + require.Contains(t, term.Output(), "Proceeding to delete the Pods of") + require.Contains(t, term.Output(), "Listing the pods to be deleted") + require.Contains(t, term.Output(), "Starting to delete the pods") + require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") + //checking the output from kubectl for rolloutstatus + require.Contains(t, buf.String(), tc.expectedOutput) + require.Contains(t, term.Output(), "No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace") } else if tc.labelValue == "codeready-toolchain" { - require.NoError(t, err, "OLM based deployment not found in") - err := restartNonOlmDeployments(*deployment1, tf, streams) require.NoError(t, err) - //checking the output from kubectl + require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") + require.Contains(t, term.Output(), "Proceeding to restart the non-OLM deployment ") + require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment") + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) + require.Contains(t, term.Output(), "No OLM based deployment restart happend as Olm deployment found in namespace") } - err1 := checkRolloutStatus(tf, streams, tc.labelSelector) - require.NoError(t, err1) - //checking the output from kubectl - require.Contains(t, buf.String(), tc.expectedMsg) }) } } func TestRestart(t *testing.T) { - t.Run("restart should fail if more than one clustername", func(t *testing.T) { - //given - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com")) - deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) - term := NewFakeTerminalWithResponse("Y") - newClient, _ := NewFakeClients(t, toolchainCluster, deployment) - ctx := clicontext.NewCommandContext(term, newClient) - //when - err := restart(ctx, "host-cool-server.com", "member") - - //then - require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") - }) - t.Run("restart should fail if zero clustername", func(t *testing.T) { - //given - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host-cool-server.com")) - deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) - term := NewFakeTerminalWithResponse("Y") - newClient, _ := NewFakeClients(t, toolchainCluster, deployment) - ctx := clicontext.NewCommandContext(term, newClient) - - //when - err := restart(ctx) - - //then - require.Error(t, err, "please provide 1 cluster name to restart the operator e.g `ksctl adm restart host`") - }) t.Run("restart should succeed with 1 clustername", func(t *testing.T) { //given SetFileConfig(t, Host()) From 51e1e4eb67bf8f5bfada9a04c94fc0356fb30392 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 27 Sep 2024 17:34:15 +0530 Subject: [PATCH 16/40] golint Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 4 ++-- pkg/cmd/adm/restart_test.go | 23 +++-------------------- 2 files changed, 5 insertions(+), 22 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 561b4ac..9b21aa2 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -91,7 +91,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } if len(olmDeploymentList.Items) == 0 { - ctx.Printlnf("No OLM based deployment restart happend as Olm deployment found in namespace %s is 0", ns) + ctx.Printlnf("No OLM based deployment restart happened as Olm deployment found in namespace %s is 0", ns) } else { for _, olmDeployment := range olmDeploymentList.Items { ctx.Printlnf("Proceeding to delete the Pods of %v", olmDeployment) @@ -116,7 +116,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } } } else { - ctx.Printlnf("No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace %s is 0", ns) + ctx.Printlnf("No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace %s is 0", ns) } return nil } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 43a83de..7c05b91 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -54,23 +54,6 @@ func TestRestartDeployment(t *testing.T) { labelSelector: "provider=codeready-toolchain", expectedOutput: "deployment.apps/registration-service restarted\n", }, - // "OlmMemberDeployment": { - // namespace: "toolchain-member-operator", - // name: "member-operator-controller-manager", - // labelKey: "kubesaw-control-plane", - // labelValue: "kubesaw-controller-manager", - // expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", - // labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", - // }, - // "NonOlmMemberDeployment": { - // namespace: "toolchain-member-operator", - // name: "member-webhooks", - // labelKey: "provider", - // labelValue: "codeready-toolchain", - // expectedMsg: "deployment \"member-webhooks\" successfully rolled out\n", - // labelSelector: "provider=codeready-toolchain", - // expectedOutput: "deployment.apps/member-webhooks restarted\n", - // }, } for k, tc := range tests { t.Run(k, func(t *testing.T) { @@ -130,7 +113,7 @@ func TestRestartDeployment(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() term := NewFakeTerminalWithResponse("Y") - pod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + pod := newPod(test.NamespacedName(namespacedName.Namespace, namespacedName.Name)) deployment1.Labels = make(map[string]string) deployment1.Labels[tc.labelKey] = tc.labelValue newClient, fakeClient := NewFakeClients(t, deployment1, pod) @@ -147,7 +130,7 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) - require.Contains(t, term.Output(), "No Non-OLM based deployment restart happend as Non-Olm deployment found in namespace") + require.Contains(t, term.Output(), "No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace") } else if tc.labelValue == "codeready-toolchain" { require.NoError(t, err) require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") @@ -156,7 +139,7 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) - require.Contains(t, term.Output(), "No OLM based deployment restart happend as Olm deployment found in namespace") + require.Contains(t, term.Output(), "No OLM based deployment restart happened as Olm deployment found in namespace") } }) From f3cf6902c4804fa5cf7ef71d489f72c4a0c9e3c2 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 10 Oct 2024 15:44:55 +0530 Subject: [PATCH 17/40] changes to the logic of restart Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 90 ++++++++++++++++---------------- pkg/cmd/adm/unregister_member.go | 2 +- resources/roles/host.yaml | 2 + resources/roles/member.yaml | 2 + 4 files changed, 51 insertions(+), 45 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 9b21aa2..74d5116 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -1,7 +1,9 @@ package adm import ( + "fmt" "os" + "time" "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/configuration" @@ -83,45 +85,50 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) + ctx.Printlnf("Fetching the current Operator and non-operator based deployments in %s namespace", ns) - olmDeploymentList, nonOlmDeploymentlist, err := getExistingDeployments(ctx, cl, ns) + operatorDeploymentList, otherDeploymentsList, allDeploymentList, err := getExistingDeployments(ctx, cl, ns) if err != nil { return err } - if len(olmDeploymentList.Items) == 0 { - ctx.Printlnf("No OLM based deployment restart happened as Olm deployment found in namespace %s is 0", ns) + if len(operatorDeploymentList.Items) == 0 { + return fmt.Errorf("no operators found to restart") } else { - for _, olmDeployment := range olmDeploymentList.Items { - ctx.Printlnf("Proceeding to delete the Pods of %v", olmDeployment) - - if err := deleteAndWaitForPods(ctx, cl, olmDeployment, f, ioStreams); err != nil { - return err - } - } - } - if len(nonOlmDeploymentlist.Items) != 0 { - for _, nonOlmDeployment := range nonOlmDeploymentlist.Items { - - ctx.Printlnf("Proceeding to restart the non-OLM deployment %v", nonOlmDeployment) - - if err := restartNonOlmDeployments(ctx, nonOlmDeployment, f, ioStreams); err != nil { - return err - } - //check the rollout status - ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment) - if err := checkRolloutStatus(ctx, f, ioStreams, "provider=codeready-toolchain"); err != nil { - return err + for _, deploymentList := range allDeploymentList { + ls := "" + for _, deployments := range deploymentList.Items { + if deployments.OwnerReferences != nil { + ctx.Printlnf("Proceeding to delete the Pods of %v", deployments.Name) + ls = "kubesaw-control-plane=kubesaw-controller-manager" + if err := deletePods(ctx, cl, deployments); err != nil { + return err + } + } + if len(otherDeploymentsList.Items) != 0 { + ls = "toolchain.dev.openshift.com/provider=codeready-toolchain" + ctx.Printlnf("Proceeding to restart the non-operator deployment %v", deployments.Name) + if err := restartNonOperatorDeployments(ctx, deployments, f, ioStreams); err != nil { + return err + } + } else { + ctx.Printlnf("No Non-operator deployment restart happened as Non-operator deployment found in namespace %s is 0", ns) + } + //waiting for the delete/rollout to start so that we get accurate status + time.Sleep(5 * time.Second) + + //check the rollout status + ctx.Printlnf("Checking the status of the deleted/rolled-out deployment %v", deployments.Name) + if err := checkRolloutStatus(ctx, f, ioStreams, ls); err != nil { + return err + } } } - } else { - ctx.Printlnf("No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace %s is 0", ns) } return nil } -func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { ctx.Printlnf("Listing the pods to be deleted") //get pods by label selector from the deployment pods := corev1.PodList{} @@ -138,18 +145,11 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien if err := cl.Delete(ctx, &pod); err != nil { return err } - - ctx.Printlnf("Checking the status of the deleted pod's deployment %v", deployment) - //check the rollout status - if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { - return err - } } return nil - } -func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartNonOperatorDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) @@ -180,21 +180,23 @@ func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStr return cmd.Run() } -func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { +func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, []*appsv1.DeploymentList, error) { - olmDeployments := &appsv1.DeploymentList{} - if err := cl.List(ctx, olmDeployments, + operatorDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, operatorDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { - return nil, nil, err + return nil, nil, nil, err } - nonOlmDeployments := &appsv1.DeploymentList{} - if err := cl.List(ctx, nonOlmDeployments, + otherDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, otherDeployments, runtimeclient.InNamespace(ns), - runtimeclient.MatchingLabels{"provider": "codeready-toolchain"}); err != nil { - return nil, nil, err + runtimeclient.MatchingLabels{"toolchain.dev.openshift.com/provider": "codeready-toolchain"}); err != nil { + return nil, nil, nil, err } + allDeployments := []*appsv1.DeploymentList{} + allDeployments = append(allDeployments, operatorDeployments, otherDeployments) - return olmDeployments, nonOlmDeployments, nil + return operatorDeployments, otherDeployments, allDeployments, nil } diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index 6d85d48..b04821c 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -62,5 +62,5 @@ func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) } ctx.Printlnf("\nThe deletion of the Toolchain member cluster from the Host cluster has been triggered") - return restart(ctx, clusterName) + return restart(ctx, "host") } diff --git a/resources/roles/host.yaml b/resources/roles/host.yaml index 876b5df..4eadb41 100644 --- a/resources/roles/host.yaml +++ b/resources/roles/host.yaml @@ -20,6 +20,8 @@ objects: - "list" - "patch" - "update" + - "watch" + - "delete" - kind: Role apiVersion: rbac.authorization.k8s.io/v1 diff --git a/resources/roles/member.yaml b/resources/roles/member.yaml index 5532c84..735212d 100644 --- a/resources/roles/member.yaml +++ b/resources/roles/member.yaml @@ -20,6 +20,8 @@ objects: - "list" - "patch" - "update" + - "watch" + - "delete" - kind: Role apiVersion: rbac.authorization.k8s.io/v1 From fcf67b661fbd3623516503496fa21a6acf0bcbe0 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Mon, 4 Nov 2024 16:59:57 +0530 Subject: [PATCH 18/40] review comments-2 Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 74 +++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 74d5116..b3d2452 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -3,7 +3,6 @@ package adm import ( "fmt" "os" - "time" "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/configuration" @@ -85,50 +84,46 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { } func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - ctx.Printlnf("Fetching the current Operator and non-operator based deployments in %s namespace", ns) + ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) - operatorDeploymentList, otherDeploymentsList, allDeploymentList, err := getExistingDeployments(ctx, cl, ns) + operatorDeploymentList, nonOperatorDeploymentList, err := getExistingDeployments(ctx, cl, ns) if err != nil { return err } if len(operatorDeploymentList.Items) == 0 { - return fmt.Errorf("no operators found to restart") + return fmt.Errorf("no operator based deployment restart happened as operator deployment found in namespace %s is 0", ns) } else { - for _, deploymentList := range allDeploymentList { - ls := "" - for _, deployments := range deploymentList.Items { - if deployments.OwnerReferences != nil { - ctx.Printlnf("Proceeding to delete the Pods of %v", deployments.Name) - ls = "kubesaw-control-plane=kubesaw-controller-manager" - if err := deletePods(ctx, cl, deployments); err != nil { - return err - } - } - if len(otherDeploymentsList.Items) != 0 { - ls = "toolchain.dev.openshift.com/provider=codeready-toolchain" - ctx.Printlnf("Proceeding to restart the non-operator deployment %v", deployments.Name) - if err := restartNonOperatorDeployments(ctx, deployments, f, ioStreams); err != nil { - return err - } - } else { - ctx.Printlnf("No Non-operator deployment restart happened as Non-operator deployment found in namespace %s is 0", ns) - } - //waiting for the delete/rollout to start so that we get accurate status - time.Sleep(5 * time.Second) + for _, operatorDeployment := range operatorDeploymentList.Items { + ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment) + if err := deleteAndWaitForPods(ctx, cl, operatorDeployment, f, ioStreams); err != nil { + return err + } + } + } + if len(nonOperatorDeploymentList.Items) != 0 { + for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { + if nonOperatorDeployment.Name != "autoscaling-buffer" { + ctx.Printlnf("Proceeding to restart the non-OLM deployment %v", nonOperatorDeployment) + + if err := restartNonOlmDeployments(ctx, nonOperatorDeployment, f, ioStreams); err != nil { + return err + } //check the rollout status - ctx.Printlnf("Checking the status of the deleted/rolled-out deployment %v", deployments.Name) - if err := checkRolloutStatus(ctx, f, ioStreams, ls); err != nil { + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment) + if err := checkRolloutStatus(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { return err } } } + } else { + ctx.Printlnf("No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace %s is 0", ns) } return nil } -func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { +func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { ctx.Printlnf("Listing the pods to be deleted") //get pods by label selector from the deployment pods := corev1.PodList{} @@ -146,10 +141,17 @@ func deletePods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deploym return err } } + + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", deployment.Name) + //check the rollout status + if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + return err + } return nil + } -func restartNonOperatorDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) @@ -180,23 +182,21 @@ func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStr return cmd.Run() } -func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, []*appsv1.DeploymentList, error) { +func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { operatorDeployments := &appsv1.DeploymentList{} if err := cl.List(ctx, operatorDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { - return nil, nil, nil, err + return nil, nil, err } - otherDeployments := &appsv1.DeploymentList{} - if err := cl.List(ctx, otherDeployments, + nonOperatorDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, nonOperatorDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"toolchain.dev.openshift.com/provider": "codeready-toolchain"}); err != nil { - return nil, nil, nil, err + return nil, nil, err } - allDeployments := []*appsv1.DeploymentList{} - allDeployments = append(allDeployments, operatorDeployments, otherDeployments) - return operatorDeployments, otherDeployments, allDeployments, nil + return operatorDeployments, nonOperatorDeployments, nil } From fd143c77c37f0fbed81b168a2aee9bbe8b6b2d11 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 5 Nov 2024 16:39:27 +0530 Subject: [PATCH 19/40] restart-test changes Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 49 +++++++++++++++---------------- pkg/cmd/adm/restart_test.go | 57 +++++++++++++++++++++++-------------- 2 files changed, 60 insertions(+), 46 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index b3d2452..05127d5 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -28,10 +28,10 @@ func NewRestartCmd() *cobra.Command { Use: "restart ", Short: "Restarts an operator", Long: `Restarts the whole operator, it relies on the target cluster and fetches the cluster config - 1. If the command is run for host operator, it restart the whole host operator. + 1. If the command is run for host operator, it restarts the whole host operator. (it deletes olm based pods(host-operator pods),waits for the new pods to come up, then uses rollout-restart command for non-olm based deployments - registration-service) - 2. If the command is run for member operator, it restart the whole member operator. + 2. If the command is run for member operator, it restarts the whole member operator. (it deletes olm based pods(member-operator pods),waits for the new pods to come up, then uses rollout-restart command for non-olm based deployments - webhooks)`, Args: cobra.ExactArgs(1), @@ -95,30 +95,37 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return fmt.Errorf("no operator based deployment restart happened as operator deployment found in namespace %s is 0", ns) } else { for _, operatorDeployment := range operatorDeploymentList.Items { - ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment) + ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name) if err := deleteAndWaitForPods(ctx, cl, operatorDeployment, f, ioStreams); err != nil { return err } + + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name) + //check the rollout status + if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + return err + } } - } - if len(nonOperatorDeploymentList.Items) != 0 { - for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { - if nonOperatorDeployment.Name != "autoscaling-buffer" { - ctx.Printlnf("Proceeding to restart the non-OLM deployment %v", nonOperatorDeployment) - if err := restartNonOlmDeployments(ctx, nonOperatorDeployment, f, ioStreams); err != nil { - return err - } - //check the rollout status - ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment) - if err := checkRolloutStatus(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { - return err + if len(nonOperatorDeploymentList.Items) != 0 { + for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { + if nonOperatorDeployment.Name != "autoscaling-buffer" { + ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name) + + if err := restartNonOlmDeployments(ctx, nonOperatorDeployment, f, ioStreams); err != nil { + return err + } + //check the rollout status + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name) + if err := checkRolloutStatus(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { + return err + } } } + } else { + ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment found in namespace %s is 0", ns) } - } else { - ctx.Printlnf("No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace %s is 0", ns) } return nil } @@ -141,12 +148,6 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien return err } } - - ctx.Printlnf("Checking the status of the deleted pod's deployment %v", deployment.Name) - //check the rollout status - if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { - return err - } return nil } @@ -164,7 +165,7 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. if err := o.Validate(); err != nil { panic(err) } - ctx.Printlnf("Running the rollout restart command for non-olm deployment %v", deployment) + ctx.Printlnf("Running the rollout restart command for non-olm deployment %v", deployment.Name) return o.RunRestart() } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 7c05b91..a554297 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -31,27 +31,37 @@ func TestRestartDeployment(t *testing.T) { tests := map[string]struct { namespace string name string + name1 string labelKey string labelValue string + labelKey1 string + labelValue1 string expectedMsg string labelSelector string expectedOutput string + lsKey string + lsValue string }{ - "OlmHostDeployment": { + "OperatorAndNonOperatorHostDeployment": { namespace: "toolchain-host-operator", name: "host-operator-controller-manager", + name1: "registration-service", labelKey: "kubesaw-control-plane", labelValue: "kubesaw-controller-manager", + labelKey1: "toolchain.dev.openshift.com/provider", + labelValue1: "codeready-toolchain", expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + lsKey: "host", + lsValue: "operator", }, - "NonOlmHostDeployment": { + "NonOperatorHostDeployment": { namespace: "toolchain-host-operator", name: "registration-service", - labelKey: "provider", + labelKey: "toolchain.dev.openshift.com/provider", labelValue: "codeready-toolchain", expectedMsg: "deployment \"registration-service\" successfully rolled out\n", - labelSelector: "provider=codeready-toolchain", + labelSelector: "toolchain.dev.openshift.com/provider=codeready-toolchain", expectedOutput: "deployment.apps/registration-service restarted\n", }, } @@ -62,8 +72,13 @@ func TestRestartDeployment(t *testing.T) { Namespace: tc.namespace, Name: tc.name, } + namespacedName1 := types.NamespacedName{ + Namespace: tc.namespace, + Name: tc.name1, + } var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} deployment1 := newDeployment(namespacedName, 1) + deployment2 := newDeployment(namespacedName1, 1) ns := scheme.Codecs.WithoutConversion() tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) tf.ClientConfigVal = cmdtesting.DefaultClientConfig() @@ -75,11 +90,15 @@ func TestRestartDeployment(t *testing.T) { GroupVersion: rolloutGroupVersionEncoder, NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - responseDeployment := &appsv1.Deployment{} - responseDeployment.Name = deployment1.Name - responseDeployment.Labels = make(map[string]string) - responseDeployment.Labels[tc.labelKey] = tc.labelValue - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) + responseDeployment1 := appsv1.Deployment{} + responseDeployment1.Name = deployment1.Name + responseDeployment1.Labels = make(map[string]string) + responseDeployment1.Labels[tc.labelKey] = tc.labelValue + responseDeployment2 := appsv1.Deployment{} + responseDeployment2.Name = deployment2.Name + responseDeployment2.Labels = make(map[string]string) + responseDeployment2.Labels[tc.labelKey1] = tc.labelValue1 + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, &responseDeployment1)))) return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil }), }, @@ -116,7 +135,9 @@ func TestRestartDeployment(t *testing.T) { pod := newPod(test.NamespacedName(namespacedName.Namespace, namespacedName.Name)) deployment1.Labels = make(map[string]string) deployment1.Labels[tc.labelKey] = tc.labelValue - newClient, fakeClient := NewFakeClients(t, deployment1, pod) + deployment2.Labels = make(map[string]string) + deployment2.Labels[tc.labelKey1] = tc.labelValue1 + newClient, fakeClient := NewFakeClients(t, deployment1, deployment2, pod) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -130,16 +151,8 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) - require.Contains(t, term.Output(), "No Non-OLM based deployment restart happened as Non-Olm deployment found in namespace") } else if tc.labelValue == "codeready-toolchain" { - require.NoError(t, err) - require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") - require.Contains(t, term.Output(), "Proceeding to restart the non-OLM deployment ") - require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment") - require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") - //checking the output from kubectl for rolloutstatus - require.Contains(t, buf.String(), tc.expectedOutput) - require.Contains(t, term.Output(), "No OLM based deployment restart happened as Olm deployment found in namespace") + require.Error(t, err) } }) @@ -148,7 +161,7 @@ func TestRestartDeployment(t *testing.T) { func TestRestart(t *testing.T) { - t.Run("restart should succeed with 1 clustername", func(t *testing.T) { + t.Run("restart should start with y response", func(t *testing.T) { //given SetFileConfig(t, Host()) toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) @@ -158,10 +171,10 @@ func TestRestart(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restart(ctx, "host") + restart(ctx, "host") //then - require.NoError(t, err) + require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") }) } From b823e107d1a81e7290baff0b65167a85175cedbf Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 6 Nov 2024 14:33:23 +0530 Subject: [PATCH 20/40] CI Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 4 ++-- pkg/cmd/adm/restart_test.go | 3 ++- pkg/cmd/adm/unregister_member_test.go | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 05127d5..e849148 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -97,7 +97,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, for _, operatorDeployment := range operatorDeploymentList.Items { ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name) - if err := deleteAndWaitForPods(ctx, cl, operatorDeployment, f, ioStreams); err != nil { + if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil { return err } @@ -130,7 +130,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return nil } -func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { ctx.Printlnf("Listing the pods to be deleted") //get pods by label selector from the deployment pods := corev1.PodList{} diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index a554297..6952761 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -171,9 +171,10 @@ func TestRestart(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - restart(ctx, "host") + err := restart(ctx, "host") //then + require.Error(t, err) require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") }) diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index a1dca35..99e1776 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -26,10 +26,10 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + UnregisterMemberCluster(ctx, "member1") // then - require.NoError(t, err) + //require.NoError(t, err) AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster) assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!") assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT") From 97997f6fbea0f1348b45b3dcc6074d05d18e5409 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Wed, 6 Nov 2024 14:35:58 +0530 Subject: [PATCH 21/40] golang ci Signed-off-by: Feny Mehta --- pkg/cmd/adm/unregister_member_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 99e1776..da7648e 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -26,10 +26,10 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1") // then - //require.NoError(t, err) + require.Error(t, err) AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster) assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!") assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT") From e34b11051116556a1d3e95fca77bdda70fc382bd Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 7 Nov 2024 15:43:28 +0530 Subject: [PATCH 22/40] adding tc Signed-off-by: Feny Mehta --- pkg/cmd/adm/register_member_test.go | 17 ------- pkg/cmd/adm/restart_test.go | 72 +++++++++++++++++++++++------ 2 files changed, 58 insertions(+), 31 deletions(-) diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index f710f2a..453e3dc 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -596,20 +596,3 @@ func defaultRegisterMemberArgs() registerMemberArgs { return args } - -func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { - // on the first call, we should have a deployment with 3 replicas ("current") and request to scale down to 0 ("requested") - // on the other calls, it's the opposite - if *numberOfUpdateCalls == 0 { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) - // check the requested deployment's replicas field - assert.Equal(t, int32(0), *deployment.Spec.Replicas) - } else { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) - // check the requested deployment's replicas field - assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) - } - *numberOfUpdateCalls++ -} diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 6952761..823a7f6 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -2,14 +2,17 @@ package adm import ( "bytes" + "context" "io" "net/http" "testing" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/codeready-toolchain/toolchain-common/pkg/test" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -64,6 +67,16 @@ func TestRestartDeployment(t *testing.T) { labelSelector: "toolchain.dev.openshift.com/provider=codeready-toolchain", expectedOutput: "deployment.apps/registration-service restarted\n", }, + "OperatorHostDeployment": { + namespace: "toolchain-host-operator", + name: "host-operator-controller-manager", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + lsKey: "host", + lsValue: "operator", + }, } for k, tc := range tests { t.Run(k, func(t *testing.T) { @@ -77,8 +90,8 @@ func TestRestartDeployment(t *testing.T) { Name: tc.name1, } var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} - deployment1 := newDeployment(namespacedName, 1) - deployment2 := newDeployment(namespacedName1, 1) + deployment1 := newDeployment(namespacedName, 3) + deployment2 := newDeployment(namespacedName1, 3) ns := scheme.Codecs.WithoutConversion() tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) tf.ClientConfigVal = cmdtesting.DefaultClientConfig() @@ -106,21 +119,17 @@ func TestRestartDeployment(t *testing.T) { tf.FakeDynamicClient.WatchReactionChain = nil tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { fw := watch.NewFake() - dep := &appsv1.Deployment{} - dep.Name = deployment1.Name - dep.Status = appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - ReadyReplicas: 1, - AvailableReplicas: 1, + deployment1.Status = appsv1.DeploymentStatus{ + Replicas: 3, + UpdatedReplicas: 3, + ReadyReplicas: 3, + AvailableReplicas: 3, UnavailableReplicas: 0, Conditions: []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, }}, } - dep.Labels = make(map[string]string) - dep.Labels[tc.labelKey] = tc.labelValue - c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(dep.DeepCopyObject()) + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(deployment1.DeepCopyObject()) if err != nil { t.Errorf("unexpected err %s", err) } @@ -139,20 +148,30 @@ func TestRestartDeployment(t *testing.T) { deployment2.Labels[tc.labelKey1] = tc.labelValue1 newClient, fakeClient := NewFakeClients(t, deployment1, deployment2, pod) ctx := clicontext.NewCommandContext(term, newClient) + numberOfUpdateCalls := 0 //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) - if tc.labelValue == "kubesaw-controller-manager" { + if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" { + fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) require.NoError(t, err) require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") require.Contains(t, term.Output(), "Proceeding to delete the Pods of") require.Contains(t, term.Output(), "Listing the pods to be deleted") require.Contains(t, term.Output(), "Starting to delete the pods") + require.Equal(t, 0, numberOfUpdateCalls) + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) + require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") + require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment") + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") } else if tc.labelValue == "codeready-toolchain" { require.Error(t, err) + } else if tc.labelValue == "kubesaw-controller-manager" { + require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace") } }) @@ -214,3 +233,28 @@ func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam type RolloutRestartRESTClient struct { *fake.RESTClient } + +func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { + deployment, ok := obj.(*appsv1.Deployment) + require.True(t, ok) + checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) + return fakeClient.Client.Update(ctx, obj, opts...) + } +} + +func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { + // on the first call, we should have a deployment with 3 replicas ("current") and request to delete to 0 ("requested") + if *numberOfUpdateCalls == 0 { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) + // check the requested deployment's replicas field + assert.Equal(t, int32(0), *deployment.Spec.Replicas) + } else { + // check the current deployment's replicas field + AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) + // check the requested deployment's replicas field + assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) + } + *numberOfUpdateCalls++ +} From 144dd2c802e10e9a44a03b4e5bd34b4c0de5f8b7 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 7 Nov 2024 17:11:24 +0530 Subject: [PATCH 23/40] some addition to test cases Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 72 ++++++++++++++----------------------- 1 file changed, 27 insertions(+), 45 deletions(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 823a7f6..2d64fbc 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -2,13 +2,10 @@ package adm import ( "bytes" - "context" "io" "net/http" "testing" - runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/codeready-toolchain/toolchain-common/pkg/test" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" @@ -16,6 +13,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apierror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -90,40 +88,31 @@ func TestRestartDeployment(t *testing.T) { Name: tc.name1, } var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} - deployment1 := newDeployment(namespacedName, 3) - deployment2 := newDeployment(namespacedName1, 3) + deployment1 := newDeployment(namespacedName, 1) + deployment2 := newDeployment(namespacedName1, 1) ns := scheme.Codecs.WithoutConversion() tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) - tf.Client = &RolloutRestartRESTClient{ - RESTClient: &fake.RESTClient{ - GroupVersion: rolloutGroupVersionEncoder, - NegotiatedSerializer: ns, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - responseDeployment1 := appsv1.Deployment{} - responseDeployment1.Name = deployment1.Name - responseDeployment1.Labels = make(map[string]string) - responseDeployment1.Labels[tc.labelKey] = tc.labelValue - responseDeployment2 := appsv1.Deployment{} - responseDeployment2.Name = deployment2.Name - responseDeployment2.Labels = make(map[string]string) - responseDeployment2.Labels[tc.labelKey1] = tc.labelValue1 - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, &responseDeployment1)))) - return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil - }), - }, + tf.Client = &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment1)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), } + cscalls := 0 tf.FakeDynamicClient.WatchReactionChain = nil tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + cscalls++ fw := watch.NewFake() deployment1.Status = appsv1.DeploymentStatus{ - Replicas: 3, - UpdatedReplicas: 3, - ReadyReplicas: 3, - AvailableReplicas: 3, + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, UnavailableReplicas: 0, Conditions: []appsv1.DeploymentCondition{{ Type: appsv1.DeploymentAvailable, @@ -148,24 +137,30 @@ func TestRestartDeployment(t *testing.T) { deployment2.Labels[tc.labelKey1] = tc.labelValue1 newClient, fakeClient := NewFakeClients(t, deployment1, deployment2, pod) ctx := clicontext.NewCommandContext(term, newClient) - numberOfUpdateCalls := 0 //when err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) + //then + actualPod := &corev1.Pod{} if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" { - fakeClient.MockUpdate = requireDeploymentBeingUpdated(t, fakeClient, namespacedName, 3, &numberOfUpdateCalls) - require.NoError(t, err) + err = fakeClient.Get(ctx, namespacedName, actualPod) + require.True(t, apierror.IsNotFound(err)) require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") require.Contains(t, term.Output(), "Proceeding to delete the Pods of") require.Contains(t, term.Output(), "Listing the pods to be deleted") require.Contains(t, term.Output(), "Starting to delete the pods") - require.Equal(t, 0, numberOfUpdateCalls) - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 3) + actual := &appsv1.Deployment{} + AssertObjectHasContent(t, fakeClient, namespacedName, actual, func() { + require.NotNil(t, actual.Spec.Replicas) + assert.Equal(t, int32(1), *actual.Spec.Replicas) + require.NotNil(t, actual.Annotations["restartedAt"]) + }) require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment") + assert.Equal(t, 2, cscalls) require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") } else if tc.labelValue == "codeready-toolchain" { @@ -230,19 +225,6 @@ func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam } } -type RolloutRestartRESTClient struct { - *fake.RESTClient -} - -func requireDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - deployment, ok := obj.(*appsv1.Deployment) - require.True(t, ok) - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - return fakeClient.Client.Update(ctx, obj, opts...) - } -} - func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { // on the first call, we should have a deployment with 3 replicas ("current") and request to delete to 0 ("requested") if *numberOfUpdateCalls == 0 { From 0d80548e9d2146b04593f2f2872558a58c6e59a6 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 7 Nov 2024 17:32:38 +0530 Subject: [PATCH 24/40] some changes Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 2d64fbc..8a6e8f4 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -99,7 +99,7 @@ func TestRestartDeployment(t *testing.T) { GroupVersion: rolloutGroupVersionEncoder, NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment1)))) + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment2)))) return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil }), } @@ -118,6 +118,16 @@ func TestRestartDeployment(t *testing.T) { Type: appsv1.DeploymentAvailable, }}, } + deployment2.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, + } c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(deployment1.DeepCopyObject()) if err != nil { t.Errorf("unexpected err %s", err) @@ -150,7 +160,7 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Listing the pods to be deleted") require.Contains(t, term.Output(), "Starting to delete the pods") actual := &appsv1.Deployment{} - AssertObjectHasContent(t, fakeClient, namespacedName, actual, func() { + AssertObjectHasContent(t, fakeClient, namespacedName1, actual, func() { require.NotNil(t, actual.Spec.Replicas) assert.Equal(t, int32(1), *actual.Spec.Replicas) require.NotNil(t, actual.Annotations["restartedAt"]) @@ -164,9 +174,10 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") } else if tc.labelValue == "codeready-toolchain" { - require.Error(t, err) + require.Error(t, err, "no operator based deployment restart happened as operator deployment found in namespace") } else if tc.labelValue == "kubesaw-controller-manager" { require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace") + assert.Equal(t, 1, cscalls) } }) From 096d49a0c2c6c52938646c89b591ddf64f129c7c Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 8 Nov 2024 13:28:38 +0530 Subject: [PATCH 25/40] adding some comments Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 8 +++-- pkg/cmd/adm/restart_test.go | 48 ++++++++++++--------------- pkg/cmd/adm/unregister_member_test.go | 6 ++-- 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index e849148..ce7d0cc 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -83,14 +83,15 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams) } +// This function has the whole logic of getting the list of operator and non-operator based deployment, then proceed on restarting/deleting accordingly func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) + ctx.Printlnf("Fetching the current Operator and non-Operator deployments of the operator in %s namespace", ns) operatorDeploymentList, nonOperatorDeploymentList, err := getExistingDeployments(ctx, cl, ns) if err != nil { return err } - + //if there is no operator deployment, no need for restart if len(operatorDeploymentList.Items) == 0 { return fmt.Errorf("no operator based deployment restart happened as operator deployment found in namespace %s is 0", ns) } else { @@ -124,6 +125,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } } } else { + //if there are no non-operator deployments ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment found in namespace %s is 0", ns) } } @@ -165,7 +167,7 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. if err := o.Validate(); err != nil { panic(err) } - ctx.Printlnf("Running the rollout restart command for non-olm deployment %v", deployment.Name) + ctx.Printlnf("Running the rollout restart command for non-Operator deployment %v", deployment.Name) return o.RunRestart() } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 8a6e8f4..4fe3ad6 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -43,6 +43,7 @@ func TestRestartDeployment(t *testing.T) { lsKey string lsValue string }{ + //operator and non-operator deployments "OperatorAndNonOperatorHostDeployment": { namespace: "toolchain-host-operator", name: "host-operator-controller-manager", @@ -56,6 +57,7 @@ func TestRestartDeployment(t *testing.T) { lsKey: "host", lsValue: "operator", }, + //only non-operator deployment "NonOperatorHostDeployment": { namespace: "toolchain-host-operator", name: "registration-service", @@ -65,6 +67,7 @@ func TestRestartDeployment(t *testing.T) { labelSelector: "toolchain.dev.openshift.com/provider=codeready-toolchain", expectedOutput: "deployment.apps/registration-service restarted\n", }, + //only operator deployment "OperatorHostDeployment": { namespace: "toolchain-host-operator", name: "host-operator-controller-manager", @@ -99,14 +102,14 @@ func TestRestartDeployment(t *testing.T) { GroupVersion: rolloutGroupVersionEncoder, NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment2)))) + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment1)))) return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil }), } - cscalls := 0 + csCalls := 0 tf.FakeDynamicClient.WatchReactionChain = nil tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { - cscalls++ + csCalls++ fw := watch.NewFake() deployment1.Status = appsv1.DeploymentStatus{ Replicas: 1, @@ -118,16 +121,6 @@ func TestRestartDeployment(t *testing.T) { Type: appsv1.DeploymentAvailable, }}, } - deployment2.Status = appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - ReadyReplicas: 1, - AvailableReplicas: 1, - UnavailableReplicas: 0, - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentAvailable, - }}, - } c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(deployment1.DeepCopyObject()) if err != nil { t.Errorf("unexpected err %s", err) @@ -152,32 +145,33 @@ func TestRestartDeployment(t *testing.T) { err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) //then actualPod := &corev1.Pod{} + //checking the whole flow(starting with operator deployments & then to non operator deployments) if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" { - err = fakeClient.Get(ctx, namespacedName, actualPod) - require.True(t, apierror.IsNotFound(err)) - require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") + //checking the flow for operator deployments + require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") require.Contains(t, term.Output(), "Proceeding to delete the Pods of") require.Contains(t, term.Output(), "Listing the pods to be deleted") require.Contains(t, term.Output(), "Starting to delete the pods") - actual := &appsv1.Deployment{} - AssertObjectHasContent(t, fakeClient, namespacedName1, actual, func() { - require.NotNil(t, actual.Spec.Replicas) - assert.Equal(t, int32(1), *actual.Spec.Replicas) - require.NotNil(t, actual.Annotations["restartedAt"]) - }) + err = fakeClient.Get(ctx, namespacedName, actualPod) + //pods are actually deleted + require.True(t, apierror.IsNotFound(err)) require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) + //checking the flowfor non-operator deployments require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") - require.Contains(t, term.Output(), "Running the rollout restart command for non-olm deployment") - assert.Equal(t, 2, cscalls) + require.Contains(t, term.Output(), "Running the rollout restart command for non-Operator deployment") + assert.Equal(t, 2, csCalls) require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") } else if tc.labelValue == "codeready-toolchain" { + //Checking the logic where no operator deployments are there require.Error(t, err, "no operator based deployment restart happened as operator deployment found in namespace") + assert.Equal(t, 0, csCalls) } else if tc.labelValue == "kubesaw-controller-manager" { + //checking the logic when only operator based deployment is there and no non-operator based require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace") - assert.Equal(t, 1, cscalls) + assert.Equal(t, 1, csCalls) } }) @@ -199,8 +193,8 @@ func TestRestart(t *testing.T) { err := restart(ctx, "host") //then - require.Error(t, err) - require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") + require.Error(t, err) //we expect an error as we have not setp up any http client , just checking that it passes the cmd phase and restart method is called + require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") }) } diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index da7648e..83e10e8 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -29,15 +29,17 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { err := UnregisterMemberCluster(ctx, "member1") // then - require.Error(t, err) + require.Error(t, err) // since we have not set up http client required for restart(),it will throw an error. + // also the restart functionality is being test in restart_test.go, not deuplicating the test, + //just a assertion to make sure that restart is started AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster) assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!") assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT") assert.Contains(t, term.Output(), "Delete Member cluster stated above from the Host cluster?") assert.Contains(t, term.Output(), "The deletion of the Toolchain member cluster from the Host cluster has been triggered") assert.NotContains(t, term.Output(), "cool-token") - AssertDeploymentHasReplicas(t, fakeClient, hostDeploymentName, 1) + require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") } func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { From bf633038af6a465f840fbb96584a5f8a35f23cc6 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 8 Nov 2024 15:15:37 +0530 Subject: [PATCH 26/40] autoscalling buffer test case Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 1 + pkg/cmd/adm/restart_test.go | 30 +++++++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index ce7d0cc..9614b12 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -123,6 +123,7 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, return err } } + ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace %s", ns) } } else { //if there are no non-operator deployments diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 4fe3ad6..122fbb2 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -57,6 +57,21 @@ func TestRestartDeployment(t *testing.T) { lsKey: "host", lsValue: "operator", }, + //operator and non-operator deployments, checking for autoscaler deployments, + //it should be treated as no non-operator deployment available + "OperatorAndNonOperatorWithAutoscalerDeployment": { + namespace: "toolchain-member-operator", + name: "member-operator-controller-manager", + name1: "autoscaling-buffer", + labelKey: "kubesaw-control-plane", + labelValue: "kubesaw-controller-manager", + labelKey1: "toolchain.dev.openshift.com/provider", + labelValue1: "codeready-toolchain", + expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", + labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", + lsKey: "host", + lsValue: "operator", + }, //only non-operator deployment "NonOperatorHostDeployment": { namespace: "toolchain-host-operator", @@ -146,7 +161,7 @@ func TestRestartDeployment(t *testing.T) { //then actualPod := &corev1.Pod{} //checking the whole flow(starting with operator deployments & then to non operator deployments) - if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" { + if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" && tc.name1 != "autoscaling-buffer" { //checking the flow for operator deployments require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") require.Contains(t, term.Output(), "Proceeding to delete the Pods of") @@ -158,9 +173,15 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) - //checking the flowfor non-operator deployments + //checking the flow for non-operator deployments require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") require.Contains(t, term.Output(), "Running the rollout restart command for non-Operator deployment") + actual := &appsv1.Deployment{} + AssertObjectHasContent(t, fakeClient, namespacedName, actual, func() { + require.NotNil(t, actual.Spec.Replicas) + assert.Equal(t, int32(1), *actual.Spec.Replicas) + require.NotNil(t, actual.Annotations["restartedAt"]) + }) assert.Equal(t, 2, csCalls) require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") @@ -168,10 +189,13 @@ func TestRestartDeployment(t *testing.T) { //Checking the logic where no operator deployments are there require.Error(t, err, "no operator based deployment restart happened as operator deployment found in namespace") assert.Equal(t, 0, csCalls) - } else if tc.labelValue == "kubesaw-controller-manager" { + } else if tc.labelValue == "kubesaw-controller-manager" && tc.name1 != "autoscaling-buffer" { //checking the logic when only operator based deployment is there and no non-operator based require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace") assert.Equal(t, 1, csCalls) + } else if tc.name1 == "autoscaling-buffer" { + require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace") + assert.Equal(t, 1, csCalls) } }) From 09411ada1197a020f0bb24540890eaeece945c98 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 12 Nov 2024 14:04:59 +0530 Subject: [PATCH 27/40] Modification of test cases Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 71 +++++----- pkg/cmd/adm/restart_test.go | 191 ++++++++++++++++++++++++-- pkg/cmd/adm/unregister_member_test.go | 4 +- 3 files changed, 218 insertions(+), 48 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 9614b12..f492bbb 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -18,6 +18,10 @@ import ( runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) +type NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error + +type RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error + // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config // 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods), // waits for the new pods to come up, then uses rollout-restart command for non-olm based - registration-service) @@ -80,11 +84,11 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { return nil } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams) + return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) } // This function has the whole logic of getting the list of operator and non-operator based deployment, then proceed on restarting/deleting accordingly -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, checker RolloutStatusCheckerFunc, restarter NonOperatorDeploymentsRestarterFunc) error { ctx.Printlnf("Fetching the current Operator and non-Operator deployments of the operator in %s namespace", ns) operatorDeploymentList, nonOperatorDeploymentList, err := getExistingDeployments(ctx, cl, ns) @@ -93,43 +97,45 @@ func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, } //if there is no operator deployment, no need for restart if len(operatorDeploymentList.Items) == 0 { - return fmt.Errorf("no operator based deployment restart happened as operator deployment found in namespace %s is 0", ns) - } else { - for _, operatorDeployment := range operatorDeploymentList.Items { - ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name) + return fmt.Errorf("no operator based deployment found in namespace %s , hence no restart happened", ns) + } + //Deleting the pods of the operator based deployment and then checking the status + for _, operatorDeployment := range operatorDeploymentList.Items { + ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name) + + if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil { + return err + } + + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name) + //check the rollout status + if err := checker(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + return err + } + } - if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil { + if len(nonOperatorDeploymentList.Items) == 0 { + // if there are no non-operator deployments + ctx.Printlnf("No Non-operator deployment found in namespace %s, hence no restart happened", ns) + return nil + } + for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { + if nonOperatorDeployment.Name != "autoscaling-buffer" { + ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name) + + if err := restarter(ctx, nonOperatorDeployment, f, ioStreams); err != nil { return err } - - ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name) //check the rollout status - if err := checkRolloutStatus(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name) + if err := checker(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { return err } + return nil } - - if len(nonOperatorDeploymentList.Items) != 0 { - for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { - if nonOperatorDeployment.Name != "autoscaling-buffer" { - ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name) - - if err := restartNonOlmDeployments(ctx, nonOperatorDeployment, f, ioStreams); err != nil { - return err - } - //check the rollout status - ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name) - if err := checkRolloutStatus(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { - return err - } - } - ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace %s", ns) - } - } else { - //if there are no non-operator deployments - ctx.Printlnf("No Non-operator deployment restart happened as Non-Operator deployment found in namespace %s is 0", ns) - } + ctx.Printlnf("Found only autoscaling-buffer deployment in namespace %s , which is not required to be restarted", ns) } + return nil } @@ -155,7 +161,7 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien } -func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartNonOperatorDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) @@ -173,6 +179,7 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. } func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) if err := cmd.Complete(f, []string{"deployment"}); err != nil { diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 122fbb2..c79154f 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -2,6 +2,7 @@ package adm import ( "bytes" + "fmt" "io" "net/http" "testing" @@ -24,6 +25,7 @@ import ( "k8s.io/client-go/rest/fake" cgtesting "k8s.io/client-go/testing" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" ) @@ -157,19 +159,12 @@ func TestRestartDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams) + err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams, checkRolloutStatus, restartNonOperatorDeployments) + //then - actualPod := &corev1.Pod{} + //checking the whole flow(starting with operator deployments & then to non operator deployments) if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" && tc.name1 != "autoscaling-buffer" { - //checking the flow for operator deployments - require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") - require.Contains(t, term.Output(), "Proceeding to delete the Pods of") - require.Contains(t, term.Output(), "Listing the pods to be deleted") - require.Contains(t, term.Output(), "Starting to delete the pods") - err = fakeClient.Get(ctx, namespacedName, actualPod) - //pods are actually deleted - require.True(t, apierror.IsNotFound(err)) require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the output from kubectl for rolloutstatus require.Contains(t, buf.String(), tc.expectedOutput) @@ -187,14 +182,14 @@ func TestRestartDeployment(t *testing.T) { require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") } else if tc.labelValue == "codeready-toolchain" { //Checking the logic where no operator deployments are there - require.Error(t, err, "no operator based deployment restart happened as operator deployment found in namespace") + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") assert.Equal(t, 0, csCalls) } else if tc.labelValue == "kubesaw-controller-manager" && tc.name1 != "autoscaling-buffer" { //checking the logic when only operator based deployment is there and no non-operator based - require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment found in namespace") + require.Contains(t, term.Output(), "No Non-operator deployment found in namespace", tc.namespace, ", hence no restart happened") assert.Equal(t, 1, csCalls) } else if tc.name1 == "autoscaling-buffer" { - require.Contains(t, term.Output(), "No Non-operator deployment restart happened as Non-Operator deployment is autoscaling-buffer found in namespace") + require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") assert.Equal(t, 1, csCalls) } @@ -202,6 +197,172 @@ func TestRestartDeployment(t *testing.T) { } } +func TestOperator(t *testing.T) { + //given + testIOStreams := genericclioptions.NewTestIOStreamsDiscard() + SetFileConfig(t, Host()) + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) + hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + hostDeployment.Labels = make(map[string]string) + hostDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" + regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) + regServDeployment.Labels = make(map[string]string) + regServDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain" + hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1) + memberDeployment.Labels = make(map[string]string) + memberDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" + autoscalarDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "autoscaling-buffer"), 1) + autoscalarDeployment.Labels = make(map[string]string) + autoscalarDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain" + actualPod := &corev1.Pod{} + term := NewFakeTerminalWithResponse("Y") + + t.Run("restart deployment returns an error if no operator based deployment found", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, regServDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector) + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + require.Equal(t, regServDeployment, deployment) + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }) + + //then + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") + }) + t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }) + + //then + require.NoError(t, err) + //checking the flow for operator deployments + require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") + require.Contains(t, term.Output(), "Proceeding to delete the Pods of") + require.Contains(t, term.Output(), "Listing the pods to be deleted") + require.Contains(t, term.Output(), "Starting to delete the pods") + err = fakeClient.Get(ctx, test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), actualPod) + //pods are actually deleted + require.True(t, apierror.IsNotFound(err)) + require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") + //checking the flow for non-operator deployments + require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + }) + + t.Run("restart deployment works successfully when only operator based deployment", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }) + + //then + require.NoError(t, err) + require.Contains(t, term.Output(), "No Non-operator deployment found in namespace toolchain-host-operator, hence no restart happened") + }) + t.Run("rollout restart returns an error", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment, hostPod) + ctx := clicontext.NewCommandContext(term, newClient) + expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return nil + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + require.Equal(t, testIOStreams, ioStreams) + require.Equal(t, nil, f) + return expectedErr + }) + + //then + require.EqualError(t, err, expectedErr.Error()) + }) + + t.Run("rollout status for the deleted pods(operator) works", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + require.Equal(t, testIOStreams, ioStreams) + return nil + }, nil) + + //then + require.NoError(t, err) + }) + + t.Run("error in rollout status of the deleted pods(operator)", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + expectedErr := fmt.Errorf("Could not check the status of the deployment") + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + return expectedErr + }, nil) + + //then + require.EqualError(t, err, expectedErr.Error()) + }) + + t.Run("autoscalling deployment should not restart", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalarDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restartDeployment(ctx, fakeClient, "toolchain-member-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + return nil + }, nil) + + //then + require.NoError(t, err) + require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") + }) + +} + func TestRestart(t *testing.T) { t.Run("restart should start with y response", func(t *testing.T) { @@ -209,6 +370,8 @@ func TestRestart(t *testing.T) { SetFileConfig(t, Host()) toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + deployment.Labels = make(map[string]string) + deployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" term := NewFakeTerminalWithResponse("Y") newClient, _ := NewFakeClients(t, toolchainCluster, deployment) ctx := clicontext.NewCommandContext(term, newClient) @@ -217,7 +380,7 @@ func TestRestart(t *testing.T) { err := restart(ctx, "host") //then - require.Error(t, err) //we expect an error as we have not setp up any http client , just checking that it passes the cmd phase and restart method is called + require.ErrorContains(t, err, "no such host") //we expect an error as we have not set up any http client , just checking that it passes the cmd phase and restartdeployment method is called require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") }) diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 83e10e8..e4b7fa0 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -29,9 +29,9 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { err := UnregisterMemberCluster(ctx, "member1") // then - require.Error(t, err) // since we have not set up http client required for restart(),it will throw an error. + require.ErrorContains(t, err, "no such host") // since we have not set up http client required for restart(),it will throw an error. // also the restart functionality is being test in restart_test.go, not deuplicating the test, - //just a assertion to make sure that restart is started + //just a assertion to make sure that restart is called and started AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster) assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!") assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT") From 857fdc9f2b6aab3ee4ed706cba500dea3f53c248 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 12 Nov 2024 14:10:01 +0530 Subject: [PATCH 28/40] Go lint Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index c79154f..4361731 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -227,13 +227,13 @@ func TestOperator(t *testing.T) { err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { require.Equal(t, regServDeployment, deployment) require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }) @@ -249,11 +249,11 @@ func TestOperator(t *testing.T) { err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }) From 760cf0cfaaf541c81adae7c8c7165bcd82cdca0d Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 12 Nov 2024 14:19:13 +0530 Subject: [PATCH 29/40] Test case of status Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 4361731..3f7c083 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -286,7 +286,7 @@ func TestOperator(t *testing.T) { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }) @@ -303,11 +303,11 @@ func TestOperator(t *testing.T) { err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return expectedErr }) @@ -346,6 +346,21 @@ func TestOperator(t *testing.T) { require.EqualError(t, err, expectedErr.Error()) }) + t.Run("error in rollout status of the Non operator deployments", func(t *testing.T) { + //given + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment) + ctx := clicontext.NewCommandContext(term, newClient) + expectedErr := fmt.Errorf("Could not check the status of the deployment") + //when + err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), + func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + return expectedErr + }, nil) + + //then + require.EqualError(t, err, expectedErr.Error()) + }) + t.Run("autoscalling deployment should not restart", func(t *testing.T) { //given newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalarDeployment) From 3517338330e96a6dfe71f3df5e39c6ae97e5383c Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 12 Nov 2024 14:20:43 +0530 Subject: [PATCH 30/40] Linter Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 3f7c083..6570f93 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -282,7 +282,7 @@ func TestOperator(t *testing.T) { err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { require.Equal(t, testIOStreams, ioStreams) - require.Equal(t, nil, f) + require.Nil(t, f) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { require.Equal(t, testIOStreams, ioStreams) From 2704a913cdfcff86a6bcd36433d2b14887d19f24 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Tue, 12 Nov 2024 18:47:03 +0530 Subject: [PATCH 31/40] test of unregister_member Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 2 +- pkg/cmd/adm/restart_test.go | 22 +++--------- pkg/cmd/adm/unregister_member.go | 6 ++-- pkg/cmd/adm/unregister_member_test.go | 50 ++++++++++++++++++++++----- 4 files changed, 50 insertions(+), 30 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index f492bbb..3fad0e0 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -51,7 +51,6 @@ func NewRestartCmd() *cobra.Command { func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { clusterName := clusterNames[0] kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() - factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) ioStreams := genericclioptions.IOStreams{ In: os.Stdin, Out: os.Stdout, @@ -73,6 +72,7 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { return err } kubeConfigFlags.KubeConfig = &kubeconfig + factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) if err != nil { diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 6570f93..06cc661 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubectl/pkg/scheme" ) -func TestRestartDeployment(t *testing.T) { +func TestRolloutKubectlFunctionality(t *testing.T) { // given tests := map[string]struct { namespace string @@ -197,7 +197,7 @@ func TestRestartDeployment(t *testing.T) { } } -func TestOperator(t *testing.T) { +func TestRestartDeployment(t *testing.T) { //given testIOStreams := genericclioptions.NewTestIOStreamsDiscard() SetFileConfig(t, Host()) @@ -209,6 +209,7 @@ func TestOperator(t *testing.T) { regServDeployment.Labels = make(map[string]string) regServDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain" hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + noisePod := newPod(test.NamespacedName("toolchain-host-operator", "noise")) memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1) memberDeployment.Labels = make(map[string]string) memberDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" @@ -242,7 +243,7 @@ func TestOperator(t *testing.T) { }) t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment) + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment, noisePod) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -346,21 +347,6 @@ func TestOperator(t *testing.T) { require.EqualError(t, err, expectedErr.Error()) }) - t.Run("error in rollout status of the Non operator deployments", func(t *testing.T) { - //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment) - ctx := clicontext.NewCommandContext(term, newClient) - expectedErr := fmt.Errorf("Could not check the status of the deployment") - //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - return expectedErr - }, nil) - - //then - require.EqualError(t, err, expectedErr.Error()) - }) - t.Run("autoscalling deployment should not restart", func(t *testing.T) { //given newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalarDeployment) diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index b04821c..ba87260 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -14,6 +14,8 @@ import ( "k8s.io/apimachinery/pkg/types" ) +type restartFunc func(ctx *clicontext.CommandContext, clusterNames ...string) error + func NewUnregisterMemberCmd() *cobra.Command { return &cobra.Command{ Use: "unregister-member ", @@ -23,12 +25,12 @@ func NewUnregisterMemberCmd() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return UnregisterMemberCluster(ctx, args[0]) + return UnregisterMemberCluster(ctx, args[0], restart) }, } } -func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string) error { +func UnregisterMemberCluster(ctx *clicontext.CommandContext, clusterName string, restart restartFunc) error { hostClusterConfig, err := configuration.LoadClusterConfig(ctx, configuration.HostName) if err != nil { return err diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index e4b7fa0..f8ebae0 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -1,6 +1,7 @@ package adm import ( + "fmt" "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" @@ -26,12 +27,12 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return nil + }) // then - require.ErrorContains(t, err, "no such host") // since we have not set up http client required for restart(),it will throw an error. - // also the restart functionality is being test in restart_test.go, not deuplicating the test, - //just a assertion to make sure that restart is called and started + require.NoError(t, err) AssertToolchainClusterDoesNotExist(t, fakeClient, toolchainCluster) assert.Contains(t, term.Output(), "!!! DANGER ZONE !!!") assert.NotContains(t, term.Output(), "THIS COMMAND WILL CAUSE UNREGISTER MEMBER CLUSTER FORM HOST CLUSTER. MAKE SURE THERE IS NO USERS LEFT IN THE MEMBER CLUSTER BEFORE UNREGISTERING IT") @@ -39,7 +40,30 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { assert.Contains(t, term.Output(), "The deletion of the Toolchain member cluster from the Host cluster has been triggered") assert.NotContains(t, term.Output(), "cool-token") AssertDeploymentHasReplicas(t, fakeClient, hostDeploymentName, 1) - require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") +} + +func TestUnregisterMemberWhenRestartError(t *testing.T) { + // given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) + hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") + deployment := newDeployment(hostDeploymentName, 1) + deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + + newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + + SetFileConfig(t, Host(), Member()) + term := NewFakeTerminalWithResponse("y") + ctx := clicontext.NewCommandContext(term, newClient) + + // when + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return fmt.Errorf("restart did not happen") + }) + + // then + require.EqualError(t, err, "restart did not happen") } func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { @@ -51,7 +75,9 @@ func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return nil + }) // then require.NoError(t, err) @@ -72,7 +98,9 @@ func TestUnregisterMemberWhenNotFound(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return nil + }) // then require.EqualError(t, err, "toolchainclusters.toolchain.dev.openshift.com \"member-cool-server.com\" not found") @@ -93,7 +121,9 @@ func TestUnregisterMemberWhenUnknownClusterName(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "some") + err := UnregisterMemberCluster(ctx, "some", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return nil + }) // then require.Error(t, err) @@ -116,7 +146,9 @@ func TestUnregisterMemberLacksPermissions(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1") + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + return nil + }) // then require.EqualError(t, err, "ksctl command failed: the token in your ksctl.yaml file is missing") From 17da5712fca21cec728909f9665179fe338652de Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 14 Nov 2024 15:54:23 +0530 Subject: [PATCH 32/40] phase-3 rc Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 103 +++--- pkg/cmd/adm/restart_test.go | 441 +++++++++++--------------- pkg/cmd/adm/unregister_member.go | 2 +- pkg/cmd/adm/unregister_member_test.go | 43 ++- 4 files changed, 290 insertions(+), 299 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 3fad0e0..cb62c04 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -3,6 +3,7 @@ package adm import ( "fmt" "os" + "time" "github.com/kubesaw/ksctl/pkg/client" "github.com/kubesaw/ksctl/pkg/configuration" @@ -18,9 +19,10 @@ import ( runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -type NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error - -type RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error +type ( + NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error + RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, labelSelector string) error +) // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config // 1. If the command is run for host operator, it restart the whole host operator.(it deletes olm based pods(host-operator pods), @@ -42,14 +44,13 @@ func NewRestartCmd() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { term := ioutils.NewTerminal(cmd.InOrStdin, cmd.OutOrStdout) ctx := clicontext.NewCommandContext(term, client.DefaultNewClient) - return restart(ctx, args...) + return restart(ctx, args[0]) }, } return command } -func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { - clusterName := clusterNames[0] +func restart(ctx *clicontext.CommandContext, clusterName string) error { kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() ioStreams := genericclioptions.IOStreams{ In: os.Stdin, @@ -84,63 +85,74 @@ func restart(ctx *clicontext.CommandContext, clusterNames ...string) error { return nil } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) + //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) + return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, labelSelector string) error { + return checkRolloutStatus(ctx, factory, ioStreams, labelSelector) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, factory, ioStreams) + }) } -// This function has the whole logic of getting the list of operator and non-operator based deployment, then proceed on restarting/deleting accordingly -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, checker RolloutStatusCheckerFunc, restarter NonOperatorDeploymentsRestarterFunc) error { +// This function has the whole logic of getting the list of olm and non-olm based deployment, then proceed on restarting/deleting accordingly +func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, checker RolloutStatusCheckerFunc, restarter NonOperatorDeploymentsRestarterFunc) error { - ctx.Printlnf("Fetching the current Operator and non-Operator deployments of the operator in %s namespace", ns) - operatorDeploymentList, nonOperatorDeploymentList, err := getExistingDeployments(ctx, cl, ns) + ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) + olmDeploymentList, nonOlmDeploymentList, err := getExistingDeployments(ctx, cl, ns) if err != nil { return err } - //if there is no operator deployment, no need for restart - if len(operatorDeploymentList.Items) == 0 { - return fmt.Errorf("no operator based deployment found in namespace %s , hence no restart happened", ns) + //if there is no olm operator deployment, no need for restart + if len(olmDeploymentList.Items) == 0 { + return fmt.Errorf("no operator deployment found in namespace %s , it is required for the operator deployment to be running so the command can proceed with restarting the KubeSaw components", ns) } - //Deleting the pods of the operator based deployment and then checking the status - for _, operatorDeployment := range operatorDeploymentList.Items { - ctx.Printlnf("Proceeding to delete the Pods of %v", operatorDeployment.Name) + //Deleting the pods of the olm based operator deployment and then checking the status + for _, olmOperatorDeployment := range olmDeploymentList.Items { + ctx.Printlnf("Proceeding to delete the Pods of %v", olmOperatorDeployment.Name) - if err := deleteAndWaitForPods(ctx, cl, operatorDeployment); err != nil { + if err := deleteDeploymentPods(ctx, cl, olmOperatorDeployment); err != nil { return err } + //sleeping here so that when the status is called we get the correct status + time.Sleep(1 * time.Second) - ctx.Printlnf("Checking the status of the deleted pod's deployment %v", operatorDeployment.Name) + ctx.Printlnf("Checking the status of the deleted pod's deployment %v", olmOperatorDeployment.Name) //check the rollout status - if err := checker(ctx, f, ioStreams, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + if err := checker(ctx, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { return err } } - if len(nonOperatorDeploymentList.Items) == 0 { - // if there are no non-operator deployments - ctx.Printlnf("No Non-operator deployment found in namespace %s, hence no restart happened", ns) + //Non-Olm deployments like reg-svc,to be restarted + //if no Non-OL deployment found it should just return with a message + if len(nonOlmDeploymentList.Items) == 0 { + // if there are no non-olm deployments + ctx.Printlnf("No Non-OLM deployment found in namespace %s, hence no restart happened", ns) return nil } - for _, nonOperatorDeployment := range nonOperatorDeploymentList.Items { - if nonOperatorDeployment.Name != "autoscaling-buffer" { - ctx.Printlnf("Proceeding to restart the non-operator deployment %v", nonOperatorDeployment.Name) - - if err := restarter(ctx, nonOperatorDeployment, f, ioStreams); err != nil { + // if there is a Non-olm deployment found use rollout-restart command + for _, nonOlmDeployment := range nonOlmDeploymentList.Items { + //it should only use rollout restart for the deployments which are NOT autoscaling-buffer + if nonOlmDeployment.Name != "autoscaling-buffer" { + ctx.Printlnf("Proceeding to restart the non-olm deployment %v", nonOlmDeployment.Name) + //using rollout-restart + if err := restarter(ctx, nonOlmDeployment); err != nil { return err } //check the rollout status - ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOperatorDeployment.Name) - if err := checker(ctx, f, ioStreams, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { + ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment.Name) + if err := checker(ctx, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { return err } return nil } + //message if there is a autoscaling buffer, it shouldn't be restarted but successfully exit ctx.Printlnf("Found only autoscaling-buffer deployment in namespace %s , which is not required to be restarted", ns) } return nil } -func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { - ctx.Printlnf("Listing the pods to be deleted") +func deleteDeploymentPods(ctx *clicontext.CommandContext, cl runtimeclient.Client, deployment appsv1.Deployment) error { //get pods by label selector from the deployment pods := corev1.PodList{} selector, _ := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -149,32 +161,34 @@ func deleteAndWaitForPods(ctx *clicontext.CommandContext, cl runtimeclient.Clien runtimeclient.InNamespace(deployment.Namespace)); err != nil { return err } - ctx.Printlnf("Starting to delete the pods") + //delete pods for _, pod := range pods.Items { pod := pod // TODO We won't need it after upgrading to go 1.22: https://go.dev/blog/loopvar-preview + ctx.Printlnf("Deleting pod: %s", pod.Name) if err := cl.Delete(ctx, &pod); err != nil { return err } } + return nil } -func restartNonOperatorDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { +func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { o := kubectlrollout.NewRolloutRestartOptions(ioStreams) if err := o.Complete(f, nil, []string{"deployment"}); err != nil { - panic(err) + return err } o.Resources = []string{"deployment/" + deployment.Name} if err := o.Validate(); err != nil { - panic(err) + return err } - ctx.Printlnf("Running the rollout restart command for non-Operator deployment %v", deployment.Name) + ctx.Printlnf("Running the rollout restart command for non-Olm deployment %v", deployment.Name) return o.RunRestart() } @@ -183,11 +197,12 @@ func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStr cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) if err := cmd.Complete(f, []string{"deployment"}); err != nil { - panic(err) + return err } + cmd.LabelSelector = labelSelector if err := cmd.Validate(); err != nil { - panic(err) + return err } ctx.Printlnf("Running the Rollout status to check the status of the deployment") return cmd.Run() @@ -195,19 +210,19 @@ func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStr func getExistingDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string) (*appsv1.DeploymentList, *appsv1.DeploymentList, error) { - operatorDeployments := &appsv1.DeploymentList{} - if err := cl.List(ctx, operatorDeployments, + olmDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, olmDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"kubesaw-control-plane": "kubesaw-controller-manager"}); err != nil { return nil, nil, err } - nonOperatorDeployments := &appsv1.DeploymentList{} - if err := cl.List(ctx, nonOperatorDeployments, + nonOlmDeployments := &appsv1.DeploymentList{} + if err := cl.List(ctx, nonOlmDeployments, runtimeclient.InNamespace(ns), runtimeclient.MatchingLabels{"toolchain.dev.openshift.com/provider": "codeready-toolchain"}); err != nil { return nil, nil, err } - return operatorDeployments, nonOperatorDeployments, nil + return olmDeployments, nonOlmDeployments, nil } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 06cc661..31043f3 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -25,197 +25,153 @@ import ( "k8s.io/client-go/rest/fake" cgtesting "k8s.io/client-go/testing" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" - cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" ) -func TestRolloutKubectlFunctionality(t *testing.T) { - // given - tests := map[string]struct { - namespace string - name string - name1 string - labelKey string - labelValue string - labelKey1 string - labelValue1 string - expectedMsg string - labelSelector string - expectedOutput string - lsKey string - lsValue string - }{ - //operator and non-operator deployments - "OperatorAndNonOperatorHostDeployment": { - namespace: "toolchain-host-operator", - name: "host-operator-controller-manager", - name1: "registration-service", - labelKey: "kubesaw-control-plane", - labelValue: "kubesaw-controller-manager", - labelKey1: "toolchain.dev.openshift.com/provider", - labelValue1: "codeready-toolchain", - expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", - labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", - lsKey: "host", - lsValue: "operator", - }, - //operator and non-operator deployments, checking for autoscaler deployments, - //it should be treated as no non-operator deployment available - "OperatorAndNonOperatorWithAutoscalerDeployment": { - namespace: "toolchain-member-operator", - name: "member-operator-controller-manager", - name1: "autoscaling-buffer", - labelKey: "kubesaw-control-plane", - labelValue: "kubesaw-controller-manager", - labelKey1: "toolchain.dev.openshift.com/provider", - labelValue1: "codeready-toolchain", - expectedMsg: "deployment \"member-operator-controller-manager\" successfully rolled out\n", - labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", - lsKey: "host", - lsValue: "operator", - }, - //only non-operator deployment - "NonOperatorHostDeployment": { - namespace: "toolchain-host-operator", - name: "registration-service", - labelKey: "toolchain.dev.openshift.com/provider", - labelValue: "codeready-toolchain", - expectedMsg: "deployment \"registration-service\" successfully rolled out\n", - labelSelector: "toolchain.dev.openshift.com/provider=codeready-toolchain", - expectedOutput: "deployment.apps/registration-service restarted\n", - }, - //only operator deployment - "OperatorHostDeployment": { - namespace: "toolchain-host-operator", - name: "host-operator-controller-manager", - labelKey: "kubesaw-control-plane", - labelValue: "kubesaw-controller-manager", - expectedMsg: "deployment \"host-operator-controller-manager\" successfully rolled out\n", - labelSelector: "kubesaw-control-plane=kubesaw-controller-manager", - lsKey: "host", - lsValue: "operator", - }, +func TestKubectlRolloutFunctionality(t *testing.T) { + + HostNamespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "host-operator-controller-manager", } - for k, tc := range tests { - t.Run(k, func(t *testing.T) { - //given - namespacedName := types.NamespacedName{ - Namespace: tc.namespace, - Name: tc.name, - } - namespacedName1 := types.NamespacedName{ - Namespace: tc.namespace, - Name: tc.name1, - } - var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} - deployment1 := newDeployment(namespacedName, 1) - deployment2 := newDeployment(namespacedName1, 1) - ns := scheme.Codecs.WithoutConversion() - tf := cmdtesting.NewTestFactory().WithNamespace(namespacedName.Namespace) - tf.ClientConfigVal = cmdtesting.DefaultClientConfig() - info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) - encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) - tf.Client = &fake.RESTClient{ - GroupVersion: rolloutGroupVersionEncoder, - NegotiatedSerializer: ns, - Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, deployment1)))) - return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil - }), - } - csCalls := 0 - tf.FakeDynamicClient.WatchReactionChain = nil - tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { - csCalls++ - fw := watch.NewFake() - deployment1.Status = appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - ReadyReplicas: 1, - AvailableReplicas: 1, - UnavailableReplicas: 0, - Conditions: []appsv1.DeploymentCondition{{ - Type: appsv1.DeploymentAvailable, - }}, - } - c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(deployment1.DeepCopyObject()) - if err != nil { - t.Errorf("unexpected err %s", err) - } - u := &unstructured.Unstructured{} - u.SetUnstructuredContent(c) - go fw.Add(u) - return true, fw, nil - }) + RegNamespacedName := types.NamespacedName{ + Namespace: "toolchain-host-operator", + Name: "registration-service", + } + var rolloutGroupVersionEncoder = schema.GroupVersion{Group: "apps", Version: "v1"} + hostDep := newDeployment(HostNamespacedName, 1) + regDep := newDeployment(RegNamespacedName, 1) + ns := scheme.Codecs.WithoutConversion() + tf := cmdtesting.NewTestFactory().WithNamespace(HostNamespacedName.Namespace) + tf.ClientConfigVal = cmdtesting.DefaultClientConfig() + info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) + encoder := ns.EncoderForVersion(info.Serializer, rolloutGroupVersionEncoder) + tf.Client = &fake.RESTClient{ + GroupVersion: rolloutGroupVersionEncoder, + NegotiatedSerializer: ns, + Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + body := io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, hostDep)))) + return &http.Response{StatusCode: http.StatusOK, Header: cmdtesting.DefaultHeader(), Body: body}, nil + }), + } + csCalls := 0 + tf.FakeDynamicClient.WatchReactionChain = nil + tf.FakeDynamicClient.AddWatchReactor("*", func(action cgtesting.Action) (handled bool, ret watch.Interface, err error) { + csCalls++ + fw := watch.NewFake() + hostDep.Status = appsv1.DeploymentStatus{ + Replicas: 1, + UpdatedReplicas: 1, + ReadyReplicas: 1, + AvailableReplicas: 1, + UnavailableReplicas: 0, + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, + }}, + } + c, err := runtime.DefaultUnstructuredConverter.ToUnstructured(hostDep.DeepCopyObject()) + if err != nil { + t.Errorf("unexpected err %s", err) + } + u := &unstructured.Unstructured{} + u.SetUnstructuredContent(c) + go fw.Add(u) + return true, fw, nil + }) + + streams, _, buf, _ := genericclioptions.NewTestIOStreams() + term := NewFakeTerminalWithResponse("Y") + pod := newPod(test.NamespacedName(hostDep.Namespace, hostDep.Name)) + hostDep.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + regDep.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} - streams, _, buf, _ := genericclioptions.NewTestIOStreams() - term := NewFakeTerminalWithResponse("Y") - pod := newPod(test.NamespacedName(namespacedName.Namespace, namespacedName.Name)) - deployment1.Labels = make(map[string]string) - deployment1.Labels[tc.labelKey] = tc.labelValue - deployment2.Labels = make(map[string]string) - deployment2.Labels[tc.labelKey1] = tc.labelValue1 - newClient, fakeClient := NewFakeClients(t, deployment1, deployment2, pod) - ctx := clicontext.NewCommandContext(term, newClient) - - //when - err := restartDeployment(ctx, fakeClient, namespacedName.Namespace, tf, streams, checkRolloutStatus, restartNonOperatorDeployments) - - //then - - //checking the whole flow(starting with operator deployments & then to non operator deployments) - if tc.labelValue == "kubesaw-controller-manager" && tc.labelValue1 == "codeready-toolchain" && tc.name1 != "autoscaling-buffer" { - require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") - //checking the output from kubectl for rolloutstatus - require.Contains(t, buf.String(), tc.expectedOutput) - //checking the flow for non-operator deployments - require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") - require.Contains(t, term.Output(), "Running the rollout restart command for non-Operator deployment") - actual := &appsv1.Deployment{} - AssertObjectHasContent(t, fakeClient, namespacedName, actual, func() { - require.NotNil(t, actual.Spec.Replicas) - assert.Equal(t, int32(1), *actual.Spec.Replicas) - require.NotNil(t, actual.Annotations["restartedAt"]) - }) - assert.Equal(t, 2, csCalls) - require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") - require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") - } else if tc.labelValue == "codeready-toolchain" { - //Checking the logic where no operator deployments are there - require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") - assert.Equal(t, 0, csCalls) - } else if tc.labelValue == "kubesaw-controller-manager" && tc.name1 != "autoscaling-buffer" { - //checking the logic when only operator based deployment is there and no non-operator based - require.Contains(t, term.Output(), "No Non-operator deployment found in namespace", tc.namespace, ", hence no restart happened") - assert.Equal(t, 1, csCalls) - } else if tc.name1 == "autoscaling-buffer" { - require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") - assert.Equal(t, 1, csCalls) - } + t.Run("Rollout Restart and Rollout Status works successfuly", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, hostDep, regDep, pod) + ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) }) - } -} + //then + require.NoError(t, err) + require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") + //checking the output from kubectl for rolloutstatus + require.Contains(t, buf.String(), "deployment.apps/host-operator-controller-manager restarted\n") + //checking the flow for non-operator deployments + require.Contains(t, term.Output(), "Proceeding to restart the non-olm deployment") + require.Contains(t, term.Output(), "Running the rollout restart command for non-Olm deployment") + actual := &appsv1.Deployment{} + AssertObjectHasContent(t, fakeClient, HostNamespacedName, actual, func() { + require.NotNil(t, actual.Spec.Replicas) + assert.Equal(t, int32(1), *actual.Spec.Replicas) + require.NotNil(t, actual.Annotations["restartedAt"]) + }) + assert.Equal(t, 2, csCalls) + require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") + require.Contains(t, term.Output(), "Running the Rollout status to check the status of the deployment") + + }) + + t.Run("Error No OLM deployment", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, regDep) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) + }) + + //then + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") + assert.Equal(t, 0, csCalls) + + }) + t.Run("No Non-OLM deployment", func(t *testing.T) { + csCalls = 0 + newClient, fakeClient := NewFakeClients(t, hostDep, pod) + ctx := clicontext.NewCommandContext(term, newClient) + + //when + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, tf, streams) + }) + + //then + require.NoError(t, err) + //checking the logic when only operator based deployment is there and no non-operator based + require.Contains(t, term.Output(), "No Non-OLM deployment found in namespace toolchain-host-operator, hence no restart happened") + assert.Equal(t, 1, csCalls) + + }) + +} func TestRestartDeployment(t *testing.T) { //given - testIOStreams := genericclioptions.NewTestIOStreamsDiscard() - SetFileConfig(t, Host()) + SetFileConfig(t, Host(), Member()) toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) + + //OLM-deployments + //host hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) - hostDeployment.Labels = make(map[string]string) - hostDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) - regServDeployment.Labels = make(map[string]string) - regServDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain" + hostDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) - noisePod := newPod(test.NamespacedName("toolchain-host-operator", "noise")) - memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1) - memberDeployment.Labels = make(map[string]string) - memberDeployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - autoscalarDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "autoscaling-buffer"), 1) - autoscalarDeployment.Labels = make(map[string]string) - autoscalarDeployment.Labels["toolchain.dev.openshift.com/provider"] = "codeready-toolchain" + extraPod := newPod(test.NamespacedName("toolchain-host-operator", "extra")) + + //Non-OLM deployments + //reg-svc + regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) + regServDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} + actualPod := &corev1.Pod{} term := NewFakeTerminalWithResponse("Y") @@ -225,52 +181,44 @@ func TestRestartDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector) return nil - }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { require.Equal(t, regServDeployment, deployment) - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) return nil }) //then - require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , hence no restart happened") + require.Error(t, err, "no operator based deployment found in namespace toolchain-host-operator , it is required for the operator deployment to be running so the command can proceed with restarting the KubeSaw components") }) + t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment, noisePod) + newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment, extraPod) ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { return nil - }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }) //then require.NoError(t, err) //checking the flow for operator deployments - require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") + require.Contains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in toolchain-host-operator namespace") require.Contains(t, term.Output(), "Proceeding to delete the Pods of") - require.Contains(t, term.Output(), "Listing the pods to be deleted") - require.Contains(t, term.Output(), "Starting to delete the pods") + require.Contains(t, term.Output(), "Deleting pod: host-operator-controller-manager") err = fakeClient.Get(ctx, test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), actualPod) //pods are actually deleted require.True(t, apierror.IsNotFound(err)) require.Contains(t, term.Output(), "Checking the status of the deleted pod's deployment") //checking the flow for non-operator deployments - require.Contains(t, term.Output(), "Proceeding to restart the non-operator deployment") + require.Contains(t, term.Output(), "Proceeding to restart the non-olm deployment") require.Contains(t, term.Output(), "Checking the status of the rolled out deployment") }) @@ -280,35 +228,28 @@ func TestRestartDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { return nil - }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }) //then require.NoError(t, err) - require.Contains(t, term.Output(), "No Non-operator deployment found in namespace toolchain-host-operator, hence no restart happened") + require.Contains(t, term.Output(), "No Non-OLM deployment found in namespace toolchain-host-operator, hence no restart happened") }) + t.Run("rollout restart returns an error", func(t *testing.T) { //given newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment, hostPod) ctx := clicontext.NewCommandContext(term, newClient) expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { return nil - }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment, f cmdutil.Factory, ioStreams genericclioptions.IOStreams) error { - require.Equal(t, testIOStreams, ioStreams) - require.Nil(t, f) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return expectedErr }) @@ -322,9 +263,8 @@ func TestRestartDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, testIOStreams, - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - require.Equal(t, testIOStreams, ioStreams) + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { return nil }, nil) @@ -338,8 +278,8 @@ func TestRestartDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) expectedErr := fmt.Errorf("Could not check the status of the deployment") //when - err := restartDeployment(ctx, fakeClient, "toolchain-host-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { + err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { return expectedErr }, nil) @@ -347,44 +287,40 @@ func TestRestartDeployment(t *testing.T) { require.EqualError(t, err, expectedErr.Error()) }) - t.Run("autoscalling deployment should not restart", func(t *testing.T) { - //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalarDeployment) - ctx := clicontext.NewCommandContext(term, newClient) - //when - err := restartDeployment(ctx, fakeClient, "toolchain-member-operator", nil, genericclioptions.NewTestIOStreamsDiscard(), - func(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { - return nil - }, nil) +} - //then - require.NoError(t, err) - require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") - }) +func TestRestartAutoScalerDeployment(t *testing.T) { + //given + SetFileConfig(t, Host(), Member()) + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) -} + //OLM-deployments + //member + memberDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "member-operator-controller-manager"), 1) + memberDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} -func TestRestart(t *testing.T) { + //Non-OLM deployments + //autoscaler + autoscalerDeployment := newDeployment(test.NamespacedName("toolchain-member-operator", "autoscaling-buffer"), 1) + autoscalerDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} + + term := NewFakeTerminalWithResponse("Y") - t.Run("restart should start with y response", func(t *testing.T) { + t.Run("autoscalling deployment should not restart", func(t *testing.T) { //given - SetFileConfig(t, Host()) - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) - deployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) - deployment.Labels = make(map[string]string) - deployment.Labels["kubesaw-control-plane"] = "kubesaw-controller-manager" - term := NewFakeTerminalWithResponse("Y") - newClient, _ := NewFakeClients(t, toolchainCluster, deployment) + newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalerDeployment) ctx := clicontext.NewCommandContext(term, newClient) - //when - err := restart(ctx, "host") + err := restartDeployments(ctx, fakeClient, "toolchain-member-operator", + func(ctx *clicontext.CommandContext, labelSelector string) error { + return nil + }, mockRolloutRestartInterceptor()) //then - require.ErrorContains(t, err, "no such host") //we expect an error as we have not set up any http client , just checking that it passes the cmd phase and restartdeployment method is called - require.Contains(t, term.Output(), "Fetching the current Operator and non-Operator deployments of the operator in") + require.NoError(t, err) + require.Contains(t, term.Output(), "Found only autoscaling-buffer deployment in namespace toolchain-member-operator , which is not required to be restarted") + require.NotContains(t, term.Output(), "Proceeding to restart the non-olm deployment") }) - } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam @@ -395,7 +331,7 @@ func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1. }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"host": "controller"}}, + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"dummy-key": "controller"}}, }, } } @@ -409,7 +345,7 @@ func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam ObjectMeta: metav1.ObjectMeta{ Namespace: namespacedName.Namespace, Name: namespacedName.Name, - Labels: map[string]string{"host": "controller"}, + Labels: map[string]string{"dummy-key": "controller"}, }, Spec: corev1.PodSpec{}, Status: corev1.PodStatus{ @@ -433,3 +369,12 @@ func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, name } *numberOfUpdateCalls++ } + +func mockRolloutRestartInterceptor() func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + if deployment.Name == "autoscaling-buffer" { + return fmt.Errorf("autoscalling deployment found") + } + return nil + } +} diff --git a/pkg/cmd/adm/unregister_member.go b/pkg/cmd/adm/unregister_member.go index ba87260..c082321 100644 --- a/pkg/cmd/adm/unregister_member.go +++ b/pkg/cmd/adm/unregister_member.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -type restartFunc func(ctx *clicontext.CommandContext, clusterNames ...string) error +type restartFunc func(ctx *clicontext.CommandContext, clusterName string) error func NewUnregisterMemberCmd() *cobra.Command { return &cobra.Command{ diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index f8ebae0..13265c3 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -27,7 +27,7 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { return nil }) @@ -58,7 +58,7 @@ func TestUnregisterMemberWhenRestartError(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { return fmt.Errorf("restart did not happen") }) @@ -66,6 +66,30 @@ func TestUnregisterMemberWhenRestartError(t *testing.T) { require.EqualError(t, err, "restart did not happen") } +func TestUnregisterMemberCallsRestart(t *testing.T) { + // given + toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) + hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") + deployment := newDeployment(hostDeploymentName, 1) + deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + + newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) + numberOfUpdateCalls := 0 + fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + + SetFileConfig(t, Host(), Member()) + term := NewFakeTerminalWithResponse("y") + ctxAct := clicontext.NewCommandContext(term, newClient) + + // when + err := UnregisterMemberCluster(ctxAct, "member1", func(ctx *clicontext.CommandContext, restartClusterName string) error { + return mockRestart(ctx, restartClusterName) + }) + + // then + require.NoError(t, err) +} + func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) @@ -75,7 +99,7 @@ func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { return nil }) @@ -98,7 +122,7 @@ func TestUnregisterMemberWhenNotFound(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { return nil }) @@ -121,7 +145,7 @@ func TestUnregisterMemberWhenUnknownClusterName(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "some", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "some", func(ctx *clicontext.CommandContext, clusterName string) error { return nil }) @@ -146,7 +170,7 @@ func TestUnregisterMemberLacksPermissions(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) // when - err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterNames ...string) error { + err := UnregisterMemberCluster(ctx, "member1", func(ctx *clicontext.CommandContext, clusterName string) error { return nil }) @@ -154,3 +178,10 @@ func TestUnregisterMemberLacksPermissions(t *testing.T) { require.EqualError(t, err, "ksctl command failed: the token in your ksctl.yaml file is missing") AssertToolchainClusterSpec(t, fakeClient, toolchainCluster) } + +func mockRestart(ctx *clicontext.CommandContext, clusterName string) error { + if clusterName == "host" && ctx != nil { + return nil + } + return fmt.Errorf("cluster name is wrong") +} From a4b51983af3e953e26780aa7f5d56eaf24afaf4a Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 14 Nov 2024 17:21:47 +0530 Subject: [PATCH 33/40] code cov Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 10 +++---- pkg/cmd/adm/restart_test.go | 52 ++++++++++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index cb62c04..d87dcdb 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -75,16 +75,16 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { kubeConfigFlags.KubeConfig = &kubeconfig factory := cmdutil.NewFactory(cmdutil.NewMatchVersionFlags(kubeConfigFlags)) - cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) - if err != nil { - return err - } - if !ctx.AskForConfirmation( ioutils.WithMessagef("restart all the deployments in the cluster '%s' and namespace '%s' \n", clusterName, cfg.OperatorNamespace)) { return nil } + cl, err := ctx.NewClient(cfg.Token, cfg.ServerAPI) + if err != nil { + return err + } + //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, labelSelector string) error { return checkRolloutStatus(ctx, factory, ioStreams, labelSelector) diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 31043f3..81426c9 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" + "github.com/h2non/gock" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/assert" @@ -85,7 +86,7 @@ func TestKubectlRolloutFunctionality(t *testing.T) { hostDep.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} regDep.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} - t.Run("Rollout Restart and Rollout Status works successfuly", func(t *testing.T) { + t.Run("Rollout Restart and Rollout Status works successfully", func(t *testing.T) { csCalls = 0 newClient, fakeClient := NewFakeClients(t, hostDep, regDep, pod) ctx := clicontext.NewCommandContext(term, newClient) @@ -323,6 +324,55 @@ func TestRestartAutoScalerDeployment(t *testing.T) { }) } +func TestRestart(t *testing.T) { + //given + t.Cleanup(gock.OffAll) + gock.New("https://cool-server.com"). + Get("api"). + Persist(). + Reply(200). + BodyString("{}") + SetFileConfig(t, Host(), Member()) + toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) + + ///OLM-deployments + //host + hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) + hostDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} + hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) + + //Non-OLM deployments + //reg-svc + regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) + regServDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} + + t.Run("No restart when users says NO in confirmaion of restart", func(t *testing.T) { + term := NewFakeTerminalWithResponse("N") + //given + newClient, _ := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) + ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restart(ctx, "host") + + //then + require.NoError(t, err) + require.NotContains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") + + }) + t.Run("fails when no factory and stream provided", func(t *testing.T) { + term := NewFakeTerminalWithResponse("Y") + //given + newClient, _ := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) + ctx := clicontext.NewCommandContext(term, newClient) + //when + err := restart(ctx, "host") + + //then + require.Error(t, err, "the server doesn't have a resource type deployment") + + }) +} + func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam return &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ From 9b889ccfddbf56176d869866aaecb217b49c0a1e Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 14 Nov 2024 17:41:51 +0530 Subject: [PATCH 34/40] some changes to status func Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 16 ++++++++-------- pkg/cmd/adm/restart_test.go | 38 +++++++++++++------------------------ 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index d87dcdb..4c0cca4 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -21,7 +21,7 @@ import ( type ( NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error - RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, labelSelector string) error + RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error ) // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config @@ -86,8 +86,8 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { } //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) - return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, labelSelector string) error { - return checkRolloutStatus(ctx, factory, ioStreams, labelSelector) + return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, factory, ioStreams, labelSelector, deployment) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, factory, ioStreams) }) @@ -117,7 +117,7 @@ func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ctx.Printlnf("Checking the status of the deleted pod's deployment %v", olmOperatorDeployment.Name) //check the rollout status - if err := checker(ctx, "kubesaw-control-plane=kubesaw-controller-manager"); err != nil { + if err := checker(ctx, "kubesaw-control-plane=kubesaw-controller-manager", olmOperatorDeployment); err != nil { return err } } @@ -140,7 +140,7 @@ func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, } //check the rollout status ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment.Name) - if err := checker(ctx, "toolchain.dev.openshift.com/provider=codeready-toolchain"); err != nil { + if err := checker(ctx, "toolchain.dev.openshift.com/provider=codeready-toolchain", nonOlmDeployment); err != nil { return err } return nil @@ -192,15 +192,15 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. return o.RunRestart() } -func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string) error { +func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string, deployment appsv1.Deployment) error { cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) - if err := cmd.Complete(f, []string{"deployment"}); err != nil { + if err := cmd.Complete(f, []string{"deployment/" + deployment.Name}); err != nil { return err } - cmd.LabelSelector = labelSelector + //cmd.LabelSelector = labelSelector if err := cmd.Validate(); err != nil { return err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 81426c9..c27bf77 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -92,8 +92,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -124,8 +124,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -141,8 +141,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -183,7 +183,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { @@ -202,7 +202,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil @@ -230,7 +230,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil @@ -248,7 +248,7 @@ func TestRestartDeployment(t *testing.T) { expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return expectedErr @@ -265,7 +265,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return nil }, nil) @@ -280,7 +280,7 @@ func TestRestartDeployment(t *testing.T) { expectedErr := fmt.Errorf("Could not check the status of the deployment") //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return expectedErr }, nil) @@ -313,7 +313,7 @@ func TestRestartAutoScalerDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when err := restartDeployments(ctx, fakeClient, "toolchain-member-operator", - func(ctx *clicontext.CommandContext, labelSelector string) error { + func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { return nil }, mockRolloutRestartInterceptor()) @@ -359,18 +359,6 @@ func TestRestart(t *testing.T) { require.NotContains(t, term.Output(), "Fetching the current OLM and non-OLM deployments of the operator in") }) - t.Run("fails when no factory and stream provided", func(t *testing.T) { - term := NewFakeTerminalWithResponse("Y") - //given - newClient, _ := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) - ctx := clicontext.NewCommandContext(term, newClient) - //when - err := restart(ctx, "host") - - //then - require.Error(t, err, "the server doesn't have a resource type deployment") - - }) } func newDeployment(namespacedName types.NamespacedName, replicas int32) *appsv1.Deployment { //nolint:unparam From 9a14e2bfdac00276a86f3e1de3e5f5be7d2eff23 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 14 Nov 2024 17:49:46 +0530 Subject: [PATCH 35/40] leftovers Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 13 +++++----- pkg/cmd/adm/restart_test.go | 48 ++++++++++++------------------------- 2 files changed, 21 insertions(+), 40 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 4c0cca4..ccf14be 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -21,7 +21,7 @@ import ( type ( NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error - RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error + RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error ) // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config @@ -86,8 +86,8 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { } //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) - return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { - return checkRolloutStatus(ctx, factory, ioStreams, labelSelector, deployment) + return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, factory, ioStreams, deployment) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, factory, ioStreams) }) @@ -117,7 +117,7 @@ func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ctx.Printlnf("Checking the status of the deleted pod's deployment %v", olmOperatorDeployment.Name) //check the rollout status - if err := checker(ctx, "kubesaw-control-plane=kubesaw-controller-manager", olmOperatorDeployment); err != nil { + if err := checker(ctx, olmOperatorDeployment); err != nil { return err } } @@ -140,7 +140,7 @@ func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, } //check the rollout status ctx.Printlnf("Checking the status of the rolled out deployment %v", nonOlmDeployment.Name) - if err := checker(ctx, "toolchain.dev.openshift.com/provider=codeready-toolchain", nonOlmDeployment); err != nil { + if err := checker(ctx, nonOlmDeployment); err != nil { return err } return nil @@ -192,7 +192,7 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. return o.RunRestart() } -func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, labelSelector string, deployment appsv1.Deployment) error { +func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStreams genericclioptions.IOStreams, deployment appsv1.Deployment) error { cmd := kubectlrollout.NewRolloutStatusOptions(ioStreams) @@ -200,7 +200,6 @@ func checkRolloutStatus(ctx *clicontext.CommandContext, f cmdutil.Factory, ioStr return err } - //cmd.LabelSelector = labelSelector if err := cmd.Validate(); err != nil { return err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index c27bf77..5dff207 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/codeready-toolchain/toolchain-common/pkg/test" - "github.com/h2non/gock" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/assert" @@ -92,8 +91,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -124,8 +123,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -141,8 +140,8 @@ func TestKubectlRolloutFunctionality(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when - err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { - return checkRolloutStatus(ctx, tf, streams, labelSelector, *hostDep) + err := restartDeployments(ctx, fakeClient, HostNamespacedName.Namespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, tf, streams, *hostDep) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return restartNonOlmDeployments(ctx, deployment, tf, streams) }) @@ -183,8 +182,8 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { - require.Equal(t, "toolchain.dev.openshift.com/provider=codeready-toolchain", labelSelector) + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + require.Equal(t, "host-operator-controller-manager", deployment.Name) return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { require.Equal(t, regServDeployment, deployment) @@ -202,7 +201,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil @@ -230,7 +229,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil @@ -248,7 +247,7 @@ func TestRestartDeployment(t *testing.T) { expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return expectedErr @@ -265,7 +264,7 @@ func TestRestartDeployment(t *testing.T) { //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }, nil) @@ -280,7 +279,7 @@ func TestRestartDeployment(t *testing.T) { expectedErr := fmt.Errorf("Could not check the status of the deployment") //when err := restartDeployments(ctx, fakeClient, "toolchain-host-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return expectedErr }, nil) @@ -313,7 +312,7 @@ func TestRestartAutoScalerDeployment(t *testing.T) { ctx := clicontext.NewCommandContext(term, newClient) //when err := restartDeployments(ctx, fakeClient, "toolchain-member-operator", - func(ctx *clicontext.CommandContext, labelSelector string, deployment appsv1.Deployment) error { + func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return nil }, mockRolloutRestartInterceptor()) @@ -326,30 +325,13 @@ func TestRestartAutoScalerDeployment(t *testing.T) { func TestRestart(t *testing.T) { //given - t.Cleanup(gock.OffAll) - gock.New("https://cool-server.com"). - Get("api"). - Persist(). - Reply(200). - BodyString("{}") SetFileConfig(t, Host(), Member()) toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) - ///OLM-deployments - //host - hostDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager"), 1) - hostDeployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} - hostPod := newPod(test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager")) - - //Non-OLM deployments - //reg-svc - regServDeployment := newDeployment(test.NamespacedName("toolchain-host-operator", "registration-service"), 1) - regServDeployment.Labels = map[string]string{"toolchain.dev.openshift.com/provider": "codeready-toolchain"} - t.Run("No restart when users says NO in confirmaion of restart", func(t *testing.T) { term := NewFakeTerminalWithResponse("N") //given - newClient, _ := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) + newClient, _ := NewFakeClients(t, toolchainCluster) ctx := clicontext.NewCommandContext(term, newClient) //when err := restart(ctx, "host") From 4f477ce6d8bf0088e7ce20fe12d311fc69bd3150 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Fri, 15 Nov 2024 12:12:40 +0530 Subject: [PATCH 36/40] merge conflict Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 114 ++--------------------------------------- 1 file changed, 5 insertions(+), 109 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index 85ed9e0..ccf14be 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -1,7 +1,6 @@ package adm import ( - "context" "fmt" "os" "time" @@ -13,10 +12,7 @@ import ( "github.com/spf13/cobra" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/cli-runtime/pkg/genericclioptions" kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" cmdutil "k8s.io/kubectl/pkg/cmd/util" @@ -89,111 +85,11 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { return err } - if len(deployments) == 0 { - err := printExistingDeployments(ctx.Terminal, cl, cfg.OperatorNamespace) - if err != nil { - ctx.Terminal.Printlnf("\nERROR: Failed to list existing deployments\n :%s", err.Error()) - } - return fmt.Errorf("at least one deployment name is required, include one or more of the above deployments to restart") - } - deploymentName := deployments[0] - - if !ctx.AskForConfirmation( - ioutils.WithMessagef("restart the deployment '%s' in namespace '%s'", deploymentName, cfg.OperatorNamespace)) { - return nil - } - return restartDeployment(ctx, cl, cfg.OperatorNamespace, deploymentName) -} - -func restartDeployment(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, deploymentName string) error { - namespacedName := types.NamespacedName{ - Namespace: ns, - Name: deploymentName, - } - - originalReplicas, err := scaleToZero(cl, namespacedName) - if err != nil { - if apierrors.IsNotFound(err) { - ctx.Printlnf("\nERROR: The given deployment '%s' wasn't found.", deploymentName) - return printExistingDeployments(ctx, cl, ns) - } - return err - } - ctx.Println("The deployment was scaled to 0") - if err := scaleBack(ctx, cl, namespacedName, originalReplicas); err != nil { - ctx.Printlnf("Scaling the deployment '%s' in namespace '%s' back to '%d' replicas wasn't successful", originalReplicas) - ctx.Println("Please, try to contact administrators to scale the deployment back manually") - return err - } - - ctx.Printlnf("The deployment was scaled back to '%d'", originalReplicas) - return nil -} - -func restartHostOperator(ctx *clicontext.CommandContext, hostClient runtimeclient.Client, hostNamespace string) error { - deployments := &appsv1.DeploymentList{} - if err := hostClient.List(context.TODO(), deployments, - runtimeclient.InNamespace(hostNamespace), - runtimeclient.MatchingLabels{"olm.owner.namespace": "toolchain-host-operator"}); err != nil { - return err - } - if len(deployments.Items) != 1 { - return fmt.Errorf("there should be a single deployment matching the label olm.owner.namespace=toolchain-host-operator in %s ns, but %d was found. "+ - "It's not possible to restart the Host Operator deployment", hostNamespace, len(deployments.Items)) - } - - return restartDeployment(ctx, hostClient, hostNamespace, deployments.Items[0].Name) -} - -func printExistingDeployments(term ioutils.Terminal, cl runtimeclient.Client, ns string) error { - deployments := &appsv1.DeploymentList{} - if err := cl.List(context.TODO(), deployments, runtimeclient.InNamespace(ns)); err != nil { - return err - } - deploymentList := "\n" - for _, deployment := range deployments.Items { - deploymentList += fmt.Sprintf("%s\n", deployment.Name) - } - term.PrintContextSeparatorWithBodyf(deploymentList, "Existing deployments in %s namespace", ns) - return nil -} - -func scaleToZero(cl runtimeclient.Client, namespacedName types.NamespacedName) (int32, error) { - // get the deployment - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return 0, err - } - // keep original number of replicas so we can bring it back - originalReplicas := *deployment.Spec.Replicas - zero := int32(0) - deployment.Spec.Replicas = &zero - - // update the deployment so it scales to zero - return originalReplicas, cl.Update(context.TODO(), deployment) -} - -func scaleBack(term ioutils.Terminal, cl runtimeclient.Client, namespacedName types.NamespacedName, originalReplicas int32) error { - return wait.PollUntilContextTimeout(context.TODO(), 500*time.Millisecond, 10*time.Second, false, func(ctx context.Context) (done bool, err error) { - term.Println("") - term.Printlnf("Trying to scale the deployment back to '%d'", originalReplicas) - // get the updated - deployment := &appsv1.Deployment{} - if err := cl.Get(context.TODO(), namespacedName, deployment); err != nil { - return false, err - } - // check if the replicas number wasn't already reset by a controller - if *deployment.Spec.Replicas == originalReplicas { - return true, nil - } - // set the original - deployment.Spec.Replicas = &originalReplicas - // and update to scale back - if err := cl.Update(context.TODO(), deployment); err != nil { - term.Printlnf("error updating Deployment '%s': %s. Will retry again...", namespacedName.Name, err.Error()) - return false, nil - } - return true, nil + //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) + return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return checkRolloutStatus(ctx, factory, ioStreams, deployment) + }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { + return restartNonOlmDeployments(ctx, deployment, factory, ioStreams) }) } From 6318f4e055b64949cf86a60ce2b233de0b195e7f Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 21 Nov 2024 15:01:14 +0530 Subject: [PATCH 37/40] some changes as per rc Signed-off-by: Feny Mehta --- pkg/cmd/adm/restart.go | 16 ++++++++-------- pkg/cmd/adm/restart_test.go | 23 ++++++++++------------- pkg/cmd/adm/unregister_member_test.go | 12 +++++------- 3 files changed, 23 insertions(+), 28 deletions(-) diff --git a/pkg/cmd/adm/restart.go b/pkg/cmd/adm/restart.go index ccf14be..2546061 100644 --- a/pkg/cmd/adm/restart.go +++ b/pkg/cmd/adm/restart.go @@ -14,14 +14,15 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" kubectlrollout "k8s.io/kubectl/pkg/cmd/rollout" cmdutil "k8s.io/kubectl/pkg/cmd/util" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) type ( - NonOperatorDeploymentsRestarterFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error - RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error + RolloutRestartFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error + RolloutStatusCheckerFunc func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error ) // NewRestartCmd() is a function to restart the whole operator, it relies on the target cluster and fetches the cluster config @@ -52,7 +53,7 @@ func NewRestartCmd() *cobra.Command { func restart(ctx *clicontext.CommandContext, clusterName string) error { kubeConfigFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag() - ioStreams := genericclioptions.IOStreams{ + ioStreams := genericiooptions.IOStreams{ In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr, @@ -85,7 +86,6 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { return err } - //return restartDeployment(ctx, cl, cfg.OperatorNamespace, factory, ioStreams, checkRolloutStatus, restartNonOperatorDeployments) return restartDeployments(ctx, cl, cfg.OperatorNamespace, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return checkRolloutStatus(ctx, factory, ioStreams, deployment) }, func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { @@ -94,7 +94,7 @@ func restart(ctx *clicontext.CommandContext, clusterName string) error { } // This function has the whole logic of getting the list of olm and non-olm based deployment, then proceed on restarting/deleting accordingly -func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, checker RolloutStatusCheckerFunc, restarter NonOperatorDeploymentsRestarterFunc) error { +func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, ns string, checker RolloutStatusCheckerFunc, restarter RolloutRestartFunc) error { ctx.Printlnf("Fetching the current OLM and non-OLM deployments of the operator in %s namespace", ns) olmDeploymentList, nonOlmDeploymentList, err := getExistingDeployments(ctx, cl, ns) @@ -143,6 +143,8 @@ func restartDeployments(ctx *clicontext.CommandContext, cl runtimeclient.Client, if err := checker(ctx, nonOlmDeployment); err != nil { return err } + //if the deployment is not auto-scaling buffer, it should return from the function and not go to print the message for autoscaling buffer + //We do not expect more than 1 non-olm deployment for each OLM deployment and hence returning here return nil } //message if there is a autoscaling buffer, it shouldn't be restarted but successfully exit @@ -179,12 +181,10 @@ func restartNonOlmDeployments(ctx *clicontext.CommandContext, deployment appsv1. o := kubectlrollout.NewRolloutRestartOptions(ioStreams) - if err := o.Complete(f, nil, []string{"deployment"}); err != nil { + if err := o.Complete(f, nil, []string{"deployment/" + deployment.Name}); err != nil { return err } - o.Resources = []string{"deployment/" + deployment.Name} - if err := o.Validate(); err != nil { return err } diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 5dff207..659a583 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" - "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/client-go/rest/fake" cgtesting "k8s.io/client-go/testing" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" @@ -79,7 +79,7 @@ func TestKubectlRolloutFunctionality(t *testing.T) { return true, fw, nil }) - streams, _, buf, _ := genericclioptions.NewTestIOStreams() + streams, _, buf, _ := genericiooptions.NewTestIOStreams() term := NewFakeTerminalWithResponse("Y") pod := newPod(test.NamespacedName(hostDep.Namespace, hostDep.Name)) hostDep.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} @@ -158,7 +158,6 @@ func TestKubectlRolloutFunctionality(t *testing.T) { func TestRestartDeployment(t *testing.T) { //given SetFileConfig(t, Host(), Member()) - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) //OLM-deployments //host @@ -177,7 +176,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("restart deployment returns an error if no operator based deployment found", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, regServDeployment) + newClient, fakeClient := NewFakeClients(t, regServDeployment) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -196,7 +195,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("restart deployment works successfully with whole operator(operator, non operator)", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod, regServDeployment, extraPod) + newClient, fakeClient := NewFakeClients(t, hostDeployment, hostPod, regServDeployment, extraPod) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -224,7 +223,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("restart deployment works successfully when only operator based deployment", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, hostPod) + newClient, fakeClient := NewFakeClients(t, hostDeployment, hostPod) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -242,7 +241,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("rollout restart returns an error", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment, regServDeployment, hostPod) + newClient, fakeClient := NewFakeClients(t, hostDeployment, regServDeployment, hostPod) ctx := clicontext.NewCommandContext(term, newClient) expectedErr := fmt.Errorf("Could not do rollout restart of the deployment") //when @@ -259,7 +258,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("rollout status for the deleted pods(operator) works", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment) + newClient, fakeClient := NewFakeClients(t, hostDeployment) ctx := clicontext.NewCommandContext(term, newClient) //when @@ -274,7 +273,7 @@ func TestRestartDeployment(t *testing.T) { t.Run("error in rollout status of the deleted pods(operator)", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, hostDeployment) + newClient, fakeClient := NewFakeClients(t, hostDeployment) ctx := clicontext.NewCommandContext(term, newClient) expectedErr := fmt.Errorf("Could not check the status of the deployment") //when @@ -292,7 +291,6 @@ func TestRestartDeployment(t *testing.T) { func TestRestartAutoScalerDeployment(t *testing.T) { //given SetFileConfig(t, Host(), Member()) - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) //OLM-deployments //member @@ -308,7 +306,7 @@ func TestRestartAutoScalerDeployment(t *testing.T) { t.Run("autoscalling deployment should not restart", func(t *testing.T) { //given - newClient, fakeClient := NewFakeClients(t, toolchainCluster, memberDeployment, autoscalerDeployment) + newClient, fakeClient := NewFakeClients(t, memberDeployment, autoscalerDeployment) ctx := clicontext.NewCommandContext(term, newClient) //when err := restartDeployments(ctx, fakeClient, "toolchain-member-operator", @@ -326,12 +324,11 @@ func TestRestartAutoScalerDeployment(t *testing.T) { func TestRestart(t *testing.T) { //given SetFileConfig(t, Host(), Member()) - toolchainCluster := NewToolchainCluster(ToolchainClusterName("host")) t.Run("No restart when users says NO in confirmaion of restart", func(t *testing.T) { term := NewFakeTerminalWithResponse("N") //given - newClient, _ := NewFakeClients(t, toolchainCluster) + newClient, _ := NewFakeClients(t) ctx := clicontext.NewCommandContext(term, newClient) //when err := restart(ctx, "host") diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 13265c3..4549195 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -46,10 +46,8 @@ func TestUnregisterMemberWhenRestartError(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} - newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) + newClient, fakeClient := NewFakeClients(t, toolchainCluster) numberOfUpdateCalls := 0 fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) @@ -70,24 +68,24 @@ func TestUnregisterMemberCallsRestart(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} - newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) + newClient, fakeClient := NewFakeClients(t, toolchainCluster) numberOfUpdateCalls := 0 fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) SetFileConfig(t, Host(), Member()) term := NewFakeTerminalWithResponse("y") ctxAct := clicontext.NewCommandContext(term, newClient) - + called := 0 // when err := UnregisterMemberCluster(ctxAct, "member1", func(ctx *clicontext.CommandContext, restartClusterName string) error { + called++ return mockRestart(ctx, restartClusterName) }) // then require.NoError(t, err) + assert.Equal(t, 1, called) } func TestUnregisterMemberWhenAnswerIsN(t *testing.T) { From 8762ebca18bcccddd1802c8fa575ec04fac733ff Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 21 Nov 2024 15:54:19 +0530 Subject: [PATCH 38/40] go version fix Signed-off-by: Feny Mehta --- go.mod | 4 +--- go.sum | 12 ------------ 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 1a3a3b3..afa043a 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/kubesaw/ksctl -go 1.21 - -toolchain go1.23.3 +go 1.20 require ( github.com/codeready-toolchain/api v0.0.0-20241114213029-44333bf24bcf diff --git a/go.sum b/go.sum index 6c1892d..8d8e568 100644 --- a/go.sum +++ b/go.sum @@ -83,7 +83,6 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= @@ -151,7 +150,6 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -246,7 +244,6 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -337,7 +334,6 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -431,7 +427,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -535,7 +530,6 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= -github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -543,7 +537,6 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/openshift/api v0.0.0-20241031180523-b1c90a6cf9a3 h1:QXptzhiO7WovLZSaXb4ig4Cd+ROctyyIJ2Tuw/Du4VI= github.com/openshift/api v0.0.0-20241031180523-b1c90a6cf9a3/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= github.com/openshift/library-go v0.0.0-20230301092340-c13b89190a26 h1:vXYT3dX03Fm5FCX1284aTGoa5qBZFp3zMnIVaV9WOdg= @@ -604,14 +597,12 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -657,7 +648,6 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -726,7 +716,6 @@ go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1055,7 +1044,6 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From 9c4ae9e6704d876bc38d8540664991ce4939db94 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 21 Nov 2024 16:40:57 +0530 Subject: [PATCH 39/40] extra left overs Signed-off-by: Feny Mehta --- pkg/cmd/adm/unregister_member_test.go | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/pkg/cmd/adm/unregister_member_test.go b/pkg/cmd/adm/unregister_member_test.go index 4549195..fb7575b 100644 --- a/pkg/cmd/adm/unregister_member_test.go +++ b/pkg/cmd/adm/unregister_member_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/codeready-toolchain/toolchain-common/pkg/test" clicontext "github.com/kubesaw/ksctl/pkg/context" . "github.com/kubesaw/ksctl/pkg/test" "github.com/stretchr/testify/assert" @@ -14,13 +13,8 @@ import ( func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) - hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - deployment := newDeployment(hostDeploymentName, 1) - deployment.Labels = map[string]string{"kubesaw-control-plane": "kubesaw-controller-manager"} - newClient, fakeClient := NewFakeClients(t, toolchainCluster, deployment) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + newClient, fakeClient := NewFakeClients(t, toolchainCluster) SetFileConfig(t, Host(), Member()) term := NewFakeTerminalWithResponse("y") @@ -39,17 +33,13 @@ func TestUnregisterMemberWhenAnswerIsY(t *testing.T) { assert.Contains(t, term.Output(), "Delete Member cluster stated above from the Host cluster?") assert.Contains(t, term.Output(), "The deletion of the Toolchain member cluster from the Host cluster has been triggered") assert.NotContains(t, term.Output(), "cool-token") - AssertDeploymentHasReplicas(t, fakeClient, hostDeploymentName, 1) } func TestUnregisterMemberWhenRestartError(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) - hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - newClient, fakeClient := NewFakeClients(t, toolchainCluster) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + newClient, _ := NewFakeClients(t, toolchainCluster) SetFileConfig(t, Host(), Member()) term := NewFakeTerminalWithResponse("y") @@ -67,11 +57,8 @@ func TestUnregisterMemberWhenRestartError(t *testing.T) { func TestUnregisterMemberCallsRestart(t *testing.T) { // given toolchainCluster := NewToolchainCluster(ToolchainClusterName("member-cool-server.com")) - hostDeploymentName := test.NamespacedName("toolchain-host-operator", "host-operator-controller-manager") - newClient, fakeClient := NewFakeClients(t, toolchainCluster) - numberOfUpdateCalls := 0 - fakeClient.MockUpdate = whenDeploymentThenUpdated(t, fakeClient, hostDeploymentName, 1, &numberOfUpdateCalls) + newClient, _ := NewFakeClients(t, toolchainCluster) SetFileConfig(t, Host(), Member()) term := NewFakeTerminalWithResponse("y") From 70c53e7f257e607f409aad17dff8d67b401ae658 Mon Sep 17 00:00:00 2001 From: Feny Mehta Date: Thu, 21 Nov 2024 16:46:11 +0530 Subject: [PATCH 40/40] linter Signed-off-by: Feny Mehta --- pkg/cmd/adm/register_member_test.go | 10 ---------- pkg/cmd/adm/restart_test.go | 16 ---------------- 2 files changed, 26 deletions(-) diff --git a/pkg/cmd/adm/register_member_test.go b/pkg/cmd/adm/register_member_test.go index 2b4d155..3d54ecf 100644 --- a/pkg/cmd/adm/register_member_test.go +++ b/pkg/cmd/adm/register_member_test.go @@ -17,7 +17,6 @@ import ( "github.com/kubesaw/ksctl/pkg/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -515,15 +514,6 @@ func verifyToolchainClusterSecret(t *testing.T, fakeClient *test.FakeClient, saN require.Equal(t, fmt.Sprintf("token-secret-for-%s", saName), apiConfig.AuthInfos["auth"].Token) } -func whenDeploymentThenUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int) func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - return func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.UpdateOption) error { - if deployment, ok := obj.(*appsv1.Deployment); ok { - checkDeploymentBeingUpdated(t, fakeClient, namespacedName, currentReplicas, numberOfUpdateCalls, deployment) - } - return fakeClient.Client.Update(ctx, obj, opts...) - } -} - func newFakeClientsFromRestConfig(t *testing.T, initObjs ...runtimeclient.Object) (newClientFromRestConfigFunc, *test.FakeClient) { fakeClient := test.NewFakeClient(t, initObjs...) fakeClient.MockCreate = func(ctx context.Context, obj runtimeclient.Object, opts ...runtimeclient.CreateOption) error { diff --git a/pkg/cmd/adm/restart_test.go b/pkg/cmd/adm/restart_test.go index 659a583..6292f44 100644 --- a/pkg/cmd/adm/restart_test.go +++ b/pkg/cmd/adm/restart_test.go @@ -371,22 +371,6 @@ func newPod(namespacedName types.NamespacedName) *corev1.Pod { //nolint:unparam } } -func checkDeploymentBeingUpdated(t *testing.T, fakeClient *test.FakeClient, namespacedName types.NamespacedName, currentReplicas int32, numberOfUpdateCalls *int, deployment *appsv1.Deployment) { - // on the first call, we should have a deployment with 3 replicas ("current") and request to delete to 0 ("requested") - if *numberOfUpdateCalls == 0 { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, currentReplicas) - // check the requested deployment's replicas field - assert.Equal(t, int32(0), *deployment.Spec.Replicas) - } else { - // check the current deployment's replicas field - AssertDeploymentHasReplicas(t, fakeClient, namespacedName, 0) - // check the requested deployment's replicas field - assert.Equal(t, currentReplicas, *deployment.Spec.Replicas) - } - *numberOfUpdateCalls++ -} - func mockRolloutRestartInterceptor() func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { return func(ctx *clicontext.CommandContext, deployment appsv1.Deployment) error { if deployment.Name == "autoscaling-buffer" {