diff --git a/app/kubemci/cmd/cmd.go b/app/kubemci/cmd/cmd.go index 170782866..52e2f450d 100644 --- a/app/kubemci/cmd/cmd.go +++ b/app/kubemci/cmd/cmd.go @@ -40,6 +40,7 @@ func NewCommand(in io.Reader, out, err io.Writer) *cobra.Command { NewCmdGetStatus(out, err), NewCmdGetVersion(out, err), newCmdList(out, err), + newCmdRemoveClusters(out, err), ) rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) return rootCmd diff --git a/app/kubemci/cmd/delete.go b/app/kubemci/cmd/delete.go index c4ba4950a..b2fedf9d7 100644 --- a/app/kubemci/cmd/delete.go +++ b/app/kubemci/cmd/delete.go @@ -18,9 +18,11 @@ import ( "fmt" "io" - "github.com/hashicorp/go-multierror" + multierror "github.com/hashicorp/go-multierror" "github.com/spf13/cobra" "k8s.io/api/extensions/v1beta1" + // gcp is needed for GKE cluster auth to work. + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/gcp/cloudinterface" gcplb "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/gcp/loadbalancer" diff --git a/app/kubemci/cmd/remove_clusters.go b/app/kubemci/cmd/remove_clusters.go new file mode 100644 index 000000000..1eb1947fa --- /dev/null +++ b/app/kubemci/cmd/remove_clusters.go @@ -0,0 +1,153 @@ +// Copyright 2018 Google, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd + +import ( + "fmt" + "io" + + multierror "github.com/hashicorp/go-multierror" + "github.com/spf13/cobra" + "k8s.io/api/extensions/v1beta1" + // gcp is needed for GKE cluster auth to work. + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/gcp/cloudinterface" + gcplb "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/gcp/loadbalancer" + gcputils "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/gcp/utils" + "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/ingress" + "github.com/GoogleCloudPlatform/k8s-multicluster-ingress/app/kubemci/pkg/kubeutils" +) + +var ( + removeClustersShortDesc = "Remove an existing multicluster ingress from some clusters." + removeClustersLongDesc = `Remove an existing multicluster ingress from some clusters. + + Takes a load balancer name and a list of clusters and removes the existing multicluster ingress from those clusters. + If the clusters have already been deleted, you can run "kubemci create --force" with the updated cluster list to update the + load balancer to be restricted to those clusters. That will however not delete the ingress from old clusters (which is fine + if the clusters have been deleted already). +` +) + +type removeClustersOptions struct { + // Name of the YAML file containing ingress spec. + IngressFilename string + // Path to kubeconfig file. + KubeconfigFilename string + // Names of the contexts to use from the kubeconfig file. + KubeContexts []string + // Name of the load balancer. + // Required. + LBName string + // Name of the GCP project in which the load balancer is configured. + GCPProject string + // Overwrite values when they differ from what's requested. If + // the resource does not exist, or is already the correct + // value, then 'force' is a no-op. + ForceUpdate bool + // Name of the namespace for the ingress when none is provided (mismatch of option with spec causes an error). + // Optional. + Namespace string +} + +func newCmdRemoveClusters(out, err io.Writer) *cobra.Command { + var options removeClustersOptions + cmd := &cobra.Command{ + Use: "remove-clusters", + Short: removeClustersShortDesc, + Long: removeClustersLongDesc, + Run: func(cmd *cobra.Command, args []string) { + if err := validateRemoveClustersArgs(&options, args); err != nil { + fmt.Println(err) + return + } + if err := runRemoveClusters(&options, args); err != nil { + fmt.Println("Error removing clusters:", err) + return + } + }, + } + addRemoveClustersFlags(cmd, &options) + return cmd +} + +func addRemoveClustersFlags(cmd *cobra.Command, options *removeClustersOptions) error { + cmd.Flags().StringVarP(&options.IngressFilename, "ingress", "i", options.IngressFilename, "[required] filename containing ingress spec") + cmd.Flags().StringVarP(&options.KubeconfigFilename, "kubeconfig", "k", options.KubeconfigFilename, "[required] path to kubeconfig file") + cmd.Flags().StringSliceVar(&options.KubeContexts, "kubecontexts", options.KubeContexts, "[optional] contexts in the kubeconfig file to remove the ingress from") + cmd.Flags().StringVarP(&options.GCPProject, "gcp-project", "", options.GCPProject, "[required] name of the gcp project") + cmd.Flags().BoolVarP(&options.ForceUpdate, "force", "f", options.ForceUpdate, "[optional] overwrite existing settings if they are different") + cmd.Flags().StringVarP(&options.Namespace, "namespace", "n", options.Namespace, "[optional] namespace for the ingress only if left unspecified by ingress spec") + return nil +} + +func validateRemoveClustersArgs(options *removeClustersOptions, args []string) error { + if len(args) != 1 { + return fmt.Errorf("unexpected args: %v. Expected one arg as name of load balancer.", args) + } + // Verify that the required params are not missing. + if options.IngressFilename == "" { + return fmt.Errorf("unexpected missing argument ingress.") + } + if options.GCPProject == "" { + project, err := gcputils.GetProjectFromGCloud() + if project == "" || err != nil { + return fmt.Errorf("unexpected cannot determine GCP project. Either set --gcp-project flag, or set a default project with gcloud such that gcloud config get-value project returns that") + } + options.GCPProject = project + } + if options.KubeconfigFilename == "" { + return fmt.Errorf("unexpected missing argument kubeconfig.") + } + return nil +} + +// runRemoveClusters removes the given load balancer from the given list of clusters. +func runRemoveClusters(options *removeClustersOptions, args []string) error { + options.LBName = args[0] + + // Unmarshal the YAML into ingress struct. + var ing v1beta1.Ingress + if err := ingress.UnmarshallAndApplyDefaults(options.IngressFilename, options.Namespace, &ing); err != nil { + return fmt.Errorf("error in unmarshalling the yaml file %s, err: %s", options.IngressFilename, err) + } + cloudInterface, err := cloudinterface.NewGCECloudInterface(options.GCPProject) + if err != nil { + err := fmt.Errorf("error in creating cloud interface: %s", err) + fmt.Println(err) + return err + } + // Get clients for all clusters + clients, err := kubeutils.GetClients(options.KubeconfigFilename, options.KubeContexts) + if err != nil { + return err + } + + lbs, err := gcplb.NewLoadBalancerSyncer(options.LBName, clients, cloudInterface, options.GCPProject) + if err != nil { + return err + } + if delErr := lbs.RemoveFromClusters(&ing, clients, options.ForceUpdate); delErr != nil { + err = multierror.Append(err, delErr) + } + + // Delete ingress resource from clusters + err = ingress.NewIngressSyncer().DeleteIngress(&ing, clients) + if err != nil { + return err + } + return err +} diff --git a/app/kubemci/pkg/gcp/backendservice/backendservicesyncer.go b/app/kubemci/pkg/gcp/backendservice/backendservicesyncer.go index a9d826135..aefa86902 100644 --- a/app/kubemci/pkg/gcp/backendservice/backendservicesyncer.go +++ b/app/kubemci/pkg/gcp/backendservice/backendservicesyncer.go @@ -89,6 +89,38 @@ func (b *BackendServiceSyncer) DeleteBackendServices(ports []ingressbe.ServicePo return nil } +// See interface for comment. +func (b *BackendServiceSyncer) RemoveFromClusters(ports []ingressbe.ServicePort, removeIGLinks []string) error { + fmt.Println("Removing backend services from clusters") + var err error + for _, p := range ports { + if beErr := b.removeFromClusters(p, removeIGLinks); beErr != nil { + beErr = fmt.Errorf("Error %s in removing backend service for port %v", beErr, p) + fmt.Printf("Error in removing backend service for port %v: %v. Continuing.\n", p, beErr) + // Try removing backend services for all ports and return all errors at once. + err = multierror.Append(err, beErr) + continue + } + } + return err +} + +func (b *BackendServiceSyncer) removeFromClusters(port ingressbe.ServicePort, removeIGLinks []string) error { + name := b.namer.BeServiceName(port.Port) + + existingBE, err := b.bsp.GetGlobalBackendService(name) + if err != nil { + err := fmt.Errorf("error in fetching existing backend service %s: %s", name, err) + fmt.Println(err) + return err + } + // Remove clusters from the backend service. + desiredBE := b.desiredBackendServiceWithoutClusters(existingBE, removeIGLinks) + glog.V(5).Infof("Existing backend service: %v\n, desired: %v\n", *existingBE, *desiredBE) + _, err = b.updateBackendService(desiredBE) + return err +} + func (b *BackendServiceSyncer) deleteBackendService(port ingressbe.ServicePort) error { name := b.namer.BeServiceName(port.Port) glog.V(2).Infof("Deleting backend service %s", name) @@ -238,6 +270,24 @@ func (b *BackendServiceSyncer) desiredBackendService(lbName string, port ingress } } +func (b *BackendServiceSyncer) desiredBackendServiceWithoutClusters(existingBE *compute.BackendService, removeIGLinks []string) *compute.BackendService { + // Compute the backends to be removed. + removeBackends := desiredBackends(removeIGLinks) + removeBackendsMap := map[string]bool{} + for _, v := range removeBackends { + removeBackendsMap[v.Group] = true + } + var newBackends []*compute.Backend + for _, v := range existingBE.Backends { + if !removeBackendsMap[v.Group] { + newBackends = append(newBackends, v) + } + } + desiredBE := existingBE + desiredBE.Backends = newBackends + return desiredBE +} + func desiredBackends(igLinks []string) []*compute.Backend { // Sort the slice so we get determistic results. sort.Strings(igLinks) diff --git a/app/kubemci/pkg/gcp/backendservice/backendservicesyncer_test.go b/app/kubemci/pkg/gcp/backendservice/backendservicesyncer_test.go index add7bc0e0..b72d94e59 100644 --- a/app/kubemci/pkg/gcp/backendservice/backendservicesyncer_test.go +++ b/app/kubemci/pkg/gcp/backendservice/backendservicesyncer_test.go @@ -166,3 +166,63 @@ func TestDeleteBackendService(t *testing.T) { t.Errorf("unexpected nil error, expected NotFound") } } + +func TestRemoveFromClusters(t *testing.T) { + lbName := "lb-name" + port := int64(32211) + portName := "portName" + ig1Link := "ig1Link" + ig2Link := "ig2Link" + hcLink := "hcLink" + kubeSvcName := "ingress-svc" + // Should create the backend service as expected. + bsp := ingressbe.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil }) + namer := utilsnamer.NewNamer("mci1", lbName) + beName := namer.BeServiceName(port) + bss := NewBackendServiceSyncer(namer, bsp) + ports := []ingressbe.ServicePort{ + { + Port: port, + Protocol: "HTTP", + SvcName: types.NamespacedName{Name: kubeSvcName}, + }, + } + // Create the backend service for 2 clusters. + if _, err := bss.EnsureBackendService(lbName, ports, healthcheck.HealthChecksMap{ + port: &compute.HealthCheck{ + SelfLink: hcLink, + }, + }, NamedPortsMap{ + port: &compute.NamedPort{ + Port: port, + Name: portName, + }, + }, []string{ig1Link, ig2Link}, false /*forceUpdate*/); err != nil { + t.Fatalf("expected no error in ensuring backend service, actual: %v", err) + } + be, err := bsp.GetGlobalBackendService(beName) + if err != nil { + t.Fatalf("expected nil error, actual: %v", err) + } + // Verify that the backend has 2 backends, one for each cluster. + if len(be.Backends) != 2 { + t.Fatalf("expected the backend service to have 2 backends for igs [%s %s], actual: %v", ig1Link, ig2Link, be.Backends) + } + + // Verify that the backend for the second cluster is removed after running RemoveFromClusters. + if err := bss.RemoveFromClusters(ports, []string{ig2Link}); err != nil { + t.Fatalf("unexpected error in removing from clusters: %s", err) + } + be, err = bsp.GetGlobalBackendService(beName) + if err != nil { + t.Fatalf("expected nil error, actual: %v", err) + } + if len(be.Backends) != 1 { + t.Fatalf("expected the backend service to have 1 backend for ig: %s, actual: %v", ig2Link, be.Backends) + } + + // Cleanup + if err := bss.DeleteBackendServices(ports); err != nil { + t.Fatalf("unexpected error in deleting backend services: %s", err) + } +} diff --git a/app/kubemci/pkg/gcp/backendservice/fake_backendservicesyncer.go b/app/kubemci/pkg/gcp/backendservice/fake_backendservicesyncer.go index 590715399..5fbfac0e1 100644 --- a/app/kubemci/pkg/gcp/backendservice/fake_backendservicesyncer.go +++ b/app/kubemci/pkg/gcp/backendservice/fake_backendservicesyncer.go @@ -61,3 +61,8 @@ func (h *FakeBackendServiceSyncer) DeleteBackendServices(ports []ingressbe.Servi h.EnsuredBackendServices = nil return nil } + +func (h *FakeBackendServiceSyncer) RemoveFromClusters(ports []ingressbe.ServicePort, removeIGLinks []string) error { + // TODO: Implement this. + return nil +} diff --git a/app/kubemci/pkg/gcp/backendservice/interfaces.go b/app/kubemci/pkg/gcp/backendservice/interfaces.go index d8033234e..fb62e3fe5 100644 --- a/app/kubemci/pkg/gcp/backendservice/interfaces.go +++ b/app/kubemci/pkg/gcp/backendservice/interfaces.go @@ -37,4 +37,6 @@ type BackendServiceSyncerInterface interface { EnsureBackendService(lbName string, ports []ingressbe.ServicePort, hcMap healthcheck.HealthChecksMap, npMap NamedPortsMap, igLinks []string, forceUpdate bool) (BackendServicesMap, error) // DeleteBackendServices deletes all backend services that would have been created by EnsureBackendService. DeleteBackendServices(ports []ingressbe.ServicePort) error + // RemoveFromClusters removes the clusters corresponding to the given removeIGLinks from the existing backend services corresponding to the given ports. + RemoveFromClusters(ports []ingressbe.ServicePort, removeIGLinks []string) error } diff --git a/app/kubemci/pkg/gcp/firewallrule/fake_firewallrulesyncer.go b/app/kubemci/pkg/gcp/firewallrule/fake_firewallrulesyncer.go index b8f10da35..0465a6280 100644 --- a/app/kubemci/pkg/gcp/firewallrule/fake_firewallrulesyncer.go +++ b/app/kubemci/pkg/gcp/firewallrule/fake_firewallrulesyncer.go @@ -50,3 +50,20 @@ func (h *FakeFirewallRuleSyncer) DeleteFirewallRules() error { h.EnsuredFirewallRules = nil return nil } + +func (h *FakeFirewallRuleSyncer) RemoveFromClusters(lbName string, removeIGLinks map[string][]string) error { + for _, v := range h.EnsuredFirewallRules { + if v.LBName != lbName { + continue + } + newIGLinks := map[string][]string{} + for clusterName, igValues := range v.IGLinks { + if _, has := removeIGLinks[clusterName]; !has { + newIGLinks[clusterName] = igValues + } + } + v.IGLinks = newIGLinks + } + h.EnsuredFirewallRules = nil + return nil +} diff --git a/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer.go b/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer.go index 2ffb83ed4..6cfc0b58e 100644 --- a/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer.go +++ b/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer.go @@ -87,6 +87,34 @@ func (s *FirewallRuleSyncer) DeleteFirewallRules() error { } } +// See interface comment. +func (s *FirewallRuleSyncer) RemoveFromClusters(lbName string, removeIGLinks map[string][]string) error { + fmt.Println("Removing clusters from firewall rule") + glog.V(5).Infof("Received instance groups: %v", removeIGLinks) + err := s.removeFromClusters(lbName, removeIGLinks) + if err != nil { + return fmt.Errorf("Error %s in removing clusters from firewall rule", err) + } + return nil +} + +func (s *FirewallRuleSyncer) removeFromClusters(lbName string, removeIGLinks map[string][]string) error { + name := s.namer.FirewallRuleName() + existingFW, err := s.fwp.GetFirewall(name) + if err != nil { + err := fmt.Errorf("error in fetching existing firewall rule %s: %s", name, err) + fmt.Println(err) + return err + } + // Remove clusters from the firewall rule. + desiredFW, err := s.desiredFirewallRuleWithoutClusters(existingFW, removeIGLinks) + if err != nil { + return err + } + glog.V(5).Infof("Existing firewall rule: %v\n, desired firewall rule: %v\n", *existingFW, *desiredFW) + return s.updateFirewallRule(desiredFW) +} + // ensureFirewallRule ensures that the required firewall rule exists for the given ports. // Does nothing if it exists already, else creates a new one. func (s *FirewallRuleSyncer) ensureFirewallRule(lbName string, ports []ingressbe.ServicePort, igLinks map[string][]string, forceUpdate bool) error { @@ -215,6 +243,45 @@ func (s *FirewallRuleSyncer) desiredFirewallRule(lbName string, ports []ingressb }, nil } +func (s *FirewallRuleSyncer) desiredFirewallRuleWithoutClusters(existingFW *compute.Firewall, removeIGLinks map[string][]string) (*compute.Firewall, error) { + // Compute the target tags that need to be removed for the given instance groups. + instances, err := s.getInstances(removeIGLinks) + if err != nil { + return nil, err + } + // This assumes that the ordering of target tags has not changed on these instances. + // A potential solution to that problem is to recompute the target tags for the existing clusters, + // rather than removing the ones for old clusters. + // But we do not want to change existing tags for clusters that are already working. + // Ideally network tags should only be appended to, so this should not be a problem. + // TODO(nikhiljindal): Fix this if it becomes a problem. + targetTags, err := s.getTargetTags(instances) + if err != nil { + return nil, err + } + existingTargetTags := existingFW.TargetTags + glog.V(3).Infof("Removing target tags %q from existing target tags %q", targetTags, existingTargetTags) + // Compute target tags as existingTargetTags - targetTags. + // Note: This assumes that all target tags are unique and no two instances from different clusters share the same target tag. + // If that is false, then opening a firewall rule for instances in one cluster will open instances from other cluster as well. + // Similarly, removing one cluster will remove the other cluster as well. + removeTags := map[string]bool{} + for _, v := range targetTags { + removeTags[v] = true + } + var newTargetTags []string + for _, v := range existingTargetTags { + if !removeTags[v] { + newTargetTags = append(newTargetTags, v) + } + } + // Sort the tags to have a deterministic order. + sort.Strings(newTargetTags) + desiredFW := existingFW + desiredFW.TargetTags = newTargetTags + return desiredFW, nil +} + // Returns an array of instances, with an instance from each cluster. func (s *FirewallRuleSyncer) getInstances(igLinks map[string][]string) ([]*compute.Instance, error) { var instances []*compute.Instance diff --git a/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer_test.go b/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer_test.go index 0ff158784..136362cbd 100644 --- a/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer_test.go +++ b/app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer_test.go @@ -107,7 +107,7 @@ func TestEnsureFirewallRule(t *testing.T) { allowed, _ := json.Marshal(fw.Allowed) t.Errorf("unexpected allowed, expected only one port item with port %s, got: %s", expectedPort, allowed) } - networkTag := instances.FakeInstance.Tags.Items[0] + networkTag := igLink if len(fw.TargetTags) != 1 || fw.TargetTags[0] != networkTag { t.Errorf("unexpected target tags in firewall rule, expected only on item for %s, got: %v", networkTag, fw.TargetTags) } @@ -154,3 +154,62 @@ func TestDeleteFirewallRule(t *testing.T) { t.Errorf("unexpected nil error, expected NotFound") } } + +func TestRemoveFromClusters(t *testing.T) { + lbName := "lb-name" + port := int64(32211) + kubeSvcName := "svc-name" + ig1Link := "https://www.googleapis.com/compute/v1/projects/abc/zones/def/instanceGroups/ig1" + ig2Link := "https://www.googleapis.com/compute/v1/projects/abc/zones/def/instanceGroups/ig2" + // Should create the firewall rule as expected. + fwp := ingressfw.NewFakeFirewallsProvider(false /* xpn */, false /* read only */) + ig := instances.NewFakeInstanceGetter() + namer := utilsnamer.NewNamer("mci1", lbName) + fwName := namer.FirewallRuleName() + fws := NewFirewallRuleSyncer(namer, fwp, ig) + // GET should return NotFound. + if _, err := fwp.GetFirewall(fwName); err == nil { + t.Fatalf("expected NotFound error, actual: nil") + } + // Create the firewall rule in 2 clusters. + igLinks := map[string][]string{ + "cluster1": {ig1Link}, + "cluster2": {ig2Link}, + } + err := fws.EnsureFirewallRule(lbName, []ingressbe.ServicePort{ + { + Port: port, + Protocol: "HTTP", + SvcName: types.NamespacedName{Name: kubeSvcName}, + }, + }, igLinks, false /*forceUpdate*/) + if err != nil { + t.Fatalf("expected no error in ensuring firewall rule, actual: %v", err) + } + // Verify that the created firewall rule is as expected. + fw, err := fwp.GetFirewall(fwName) + if err != nil { + t.Fatalf("expected nil error, actual: %v", err) + } + // Verify that the firewall rule should have 2 target tags, one for each cluster. + if len(fw.TargetTags) != 2 { + t.Fatalf("expected the ensured firewall rule to have 2 target tags [%s %s], actual: %q", ig1Link, ig2Link, fw.TargetTags) + } + + // Verify that the target tag for the second cluster is removed after running RemoveFromCluster + if err := fws.RemoveFromClusters(lbName, map[string][]string{"cluster2": {ig2Link}}); err != nil { + t.Fatalf("unexpected error in removing cluster from existing firewall rule: %s", err) + } + fw, err = fwp.GetFirewall(fwName) + if err != nil { + t.Fatalf("expected nil error in fetching firewall rule, actual: %v", err) + } + if len(fw.TargetTags) != 1 || fw.TargetTags[0] != ig1Link { + t.Fatalf("expected the updated firewall rule to have just one target tag %s, actual: %q", ig1Link, fw.TargetTags) + } + + // Cleanup + if err := fws.DeleteFirewallRules(); err != nil { + t.Fatalf("unexpected error in deleting firewall rules: %s", err) + } +} diff --git a/app/kubemci/pkg/gcp/firewallrule/interfaces.go b/app/kubemci/pkg/gcp/firewallrule/interfaces.go index 0d2e80d3a..b8dda5246 100644 --- a/app/kubemci/pkg/gcp/firewallrule/interfaces.go +++ b/app/kubemci/pkg/gcp/firewallrule/interfaces.go @@ -25,4 +25,6 @@ type FirewallRuleSyncerInterface interface { EnsureFirewallRule(lbName string, ports []ingressbe.ServicePort, igLinks map[string][]string, forceUpdate bool) error // DeleteFirewallRules deletes all firewall rules that would have been created by EnsureFirewallRule. DeleteFirewallRules() error + // RemoveFromClusters removes the clusters corresponding to the given instance groups from the firewall rule. + RemoveFromClusters(lbName string, removeIGLinks map[string][]string) error } diff --git a/app/kubemci/pkg/gcp/forwardingrule/fake_forwardingrulesyncer.go b/app/kubemci/pkg/gcp/forwardingrule/fake_forwardingrulesyncer.go index 9d1928f38..8a22fd9e7 100644 --- a/app/kubemci/pkg/gcp/forwardingrule/fake_forwardingrulesyncer.go +++ b/app/kubemci/pkg/gcp/forwardingrule/fake_forwardingrulesyncer.go @@ -92,3 +92,8 @@ func (f *FakeForwardingRuleSyncer) ListLoadBalancerStatuses() ([]status.LoadBala } return ret, nil } + +func (f *FakeForwardingRuleSyncer) RemoveClustersFromStatus(clusters []string) error { + // TODO: Implement this. + return nil +} diff --git a/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer.go b/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer.go index 1114aabd0..602b6951b 100644 --- a/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer.go +++ b/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer.go @@ -204,6 +204,42 @@ func (s *ForwardingRuleSyncer) ListLoadBalancerStatuses() ([]status.LoadBalancer return result, err } +// See interface for comment. +func (s *ForwardingRuleSyncer) RemoveClustersFromStatus(clusters []string) error { + // Update HTTPS status first and then HTTP. + // This ensures that get-status returns the correct status even if HTTPS is updated but updating HTTP fails. + // This is because get-status reads from HTTP if it exists. + // Also note that it continues silently if either HTTP or HTTPS forwarding rules do not exist. + if err := s.removeClustersFromStatus("https", s.namer.HttpsForwardingRuleName(), clusters); err != nil { + return fmt.Errorf("unexpected error in updating the https forwarding rule status: %s", err) + } + return s.removeClustersFromStatus("http", s.namer.HttpForwardingRuleName(), clusters) +} + +// ensureForwardingRule ensures a forwarding rule exists as per the given input parameters. +func (s *ForwardingRuleSyncer) removeClustersFromStatus(httpProtocol, name string, clusters []string) error { + fmt.Println("Removing clusters", clusters, "from", httpProtocol, "forwarding rule") + existingFR, err := s.frp.GetGlobalForwardingRule(name) + if err != nil { + // Ignore NotFound error. + if utils.IsHTTPErrorCode(err, http.StatusNotFound) { + fmt.Println(httpProtocol, "forwarding rule not found. Nothing to update. Continuing") + return nil + } + err := fmt.Errorf("error in fetching existing forwarding rule %s: %s", name, err) + fmt.Println(err) + return err + } + // Remove clusters from the forwardingrule + desiredFR, err := s.desiredForwardingRuleWithoutClusters(existingFR, clusters) + if err != nil { + fmt.Println("Error getting desired forwarding rule:", err) + return err + } + glog.V(5).Infof("Existing forwarding rule: %v\n, desired forwarding rule: %v\n", *existingFR, *desiredFR) + return s.updateForwardingRule(existingFR, desiredFR) +} + func (s *ForwardingRuleSyncer) updateForwardingRule(existingFR, desiredFR *compute.ForwardingRule) error { name := desiredFR.Name fmt.Println("Updating existing forwarding rule", name, "to match the desired state") @@ -282,3 +318,29 @@ func (s *ForwardingRuleSyncer) desiredForwardingRule(lbName, ipAddress, targetPr LoadBalancingScheme: "EXTERNAL", }, nil } + +func (s *ForwardingRuleSyncer) desiredForwardingRuleWithoutClusters(existingFR *compute.ForwardingRule, removeClusters []string) (*compute.ForwardingRule, error) { + statusStr := existingFR.Description + status, err := status.FromString(statusStr) + if err != nil { + return nil, fmt.Errorf("error in parsing string %s: %s", statusStr, err) + } + removeMap := map[string]bool{} + for _, v := range removeClusters { + removeMap[v] = true + } + var newClusters []string + for _, v := range status.Clusters { + if !removeMap[v] { + newClusters = append(newClusters, v) + } + } + status.Clusters = newClusters + newDesc, err := status.ToString() + if err != nil { + return nil, fmt.Errorf("unexpected error in generating the description for forwarding rule: %s", err) + } + desiredFR := existingFR + desiredFR.Description = newDesc + return desiredFR, nil +} diff --git a/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer_test.go b/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer_test.go index 09e74a27c..23466337c 100644 --- a/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer_test.go +++ b/app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer_test.go @@ -15,6 +15,7 @@ package forwardingrule import ( + "fmt" "reflect" "testing" @@ -522,3 +523,94 @@ func TestGetLoadBalancerStatus(t *testing.T) { } } } + +// Tests that the Load Balancer status contains the expected data (mci metadata). +func TestRemoveClustersFromStatus(t *testing.T) { + lbName := "lb-name" + ipAddr := "1.2.3.4" + tpLink := "fakeLink" + clusters := []string{"cluster1", "cluster2"} + // Should create the forwarding rule as expected. + frp := ingresslb.NewFakeLoadBalancers("" /*name*/, nil /*namer*/) + namer := utilsnamer.NewNamer("mci1", lbName) + frs := NewForwardingRuleSyncer(namer, frp) + testCases := []struct { + // Should the http forwarding rule be created. + http bool + // Should the https forwarding rule be created. + https bool + shouldErr bool + }{ + { + http: true, + https: false, + shouldErr: false, + }, + { + http: false, + https: true, + shouldErr: false, + }, + { + http: true, + https: true, + shouldErr: false, + }, + { + http: false, + https: false, + shouldErr: true, + }, + } + for i, c := range testCases { + glog.Infof("===============testing case: %d=================", i) + if c.http { + // Ensure http forwarding rule. + if err := frs.EnsureHttpForwardingRule(lbName, ipAddr, tpLink, clusters, false /*force*/); err != nil { + t.Errorf("expected no error in ensuring http forwarding rule, actual: %v", err) + } + } + if c.https { + // Ensure https forwarding rule. + if err := frs.EnsureHttpsForwardingRule(lbName, ipAddr, tpLink, clusters, false /*force*/); err != nil { + t.Errorf("expected no error in ensuring https forwarding rule, actual: %v", err) + } + } + if err := verifyClusters(lbName, frs, c.shouldErr, clusters); err != nil { + t.Errorf("%s", err) + } + // Update status to remove one cluster. + if err := frs.RemoveClustersFromStatus([]string{"cluster1"}); err != nil { + t.Errorf("unexpected error in updating status to remove clusters: %s", err) + } + // Verify that status description has only one cluster now. + if err := verifyClusters(lbName, frs, c.shouldErr, []string{"cluster2"}); err != nil { + t.Errorf("%s", err) + } + + if !c.shouldErr { + // Delete the forwarding rules if we created at least one. + if c.http || c.https { + if err := frs.DeleteForwardingRules(); err != nil { + t.Errorf("unexpeted error in deleting forwarding rules: %s", err) + } + } + } + } +} + +// verifyClusters verifies that the given load balancer has the expected clusters in status. +// Returns error otherwise. +func verifyClusters(lbName string, frs ForwardingRuleSyncerInterface, shouldErr bool, expectedClusters []string) error { + status, err := frs.GetLoadBalancerStatus(lbName) + if shouldErr != (err != nil) { + return fmt.Errorf("unexpected error in getting status description for load balancer %s, expected err != nil: %v, actual err: %s", lbName, shouldErr, err) + } + if !shouldErr && err != nil { + // Verify that status description has both the clusters + if !reflect.DeepEqual(status.Clusters, expectedClusters) { + return fmt.Errorf("unexpected list of clusters, expected: %v, got: %v", expectedClusters, status.Clusters) + } + } + return nil +} diff --git a/app/kubemci/pkg/gcp/forwardingrule/interfaces.go b/app/kubemci/pkg/gcp/forwardingrule/interfaces.go index 7127fbe9d..56278be75 100644 --- a/app/kubemci/pkg/gcp/forwardingrule/interfaces.go +++ b/app/kubemci/pkg/gcp/forwardingrule/interfaces.go @@ -31,8 +31,11 @@ type ForwardingRuleSyncerInterface interface { // DeleteForwardingRules deletes the forwarding rules that // EnsureHttpForwardingRule and EnsureHttpsForwardingRule would have created. DeleteForwardingRules() error + // GetLoadBalancerStatus returns the struct describing the status of the given load balancer. GetLoadBalancerStatus(lbName string) (*status.LoadBalancerStatus, error) // ListLoadBalancerStatuses returns status of all MCI ingresses (load balancers). ListLoadBalancerStatuses() ([]status.LoadBalancerStatus, error) + // RemoveClustersFromStatus removes the given clusters from the LoadBalancerStatus. + RemoveClustersFromStatus(clusters []string) error } diff --git a/app/kubemci/pkg/gcp/instances/fake_instancegetter.go b/app/kubemci/pkg/gcp/instances/fake_instancegetter.go index 72dbe6796..27f8d31eb 100644 --- a/app/kubemci/pkg/gcp/instances/fake_instancegetter.go +++ b/app/kubemci/pkg/gcp/instances/fake_instancegetter.go @@ -18,12 +18,14 @@ import ( compute "google.golang.org/api/compute/v1" ) -var FakeInstance = &compute.Instance{ - Name: "fake-instance", - Zone: "my-zone", - Tags: &compute.Tags{ - Items: []string{"fake-tag"}, - }, +func newInstance(igUrl string) *compute.Instance { + return &compute.Instance{ + Name: igUrl, + Zone: igUrl, + Tags: &compute.Tags{ + Items: []string{igUrl}, + }, + } } func NewFakeInstanceGetter() InstanceGetterInterface { @@ -38,5 +40,5 @@ type FakeInstanceGetter struct { var _ InstanceGetterInterface = &FakeInstanceGetter{} func (g *FakeInstanceGetter) GetInstance(igUrl string) (*compute.Instance, error) { - return FakeInstance, nil + return newInstance(igUrl), nil } diff --git a/app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer.go b/app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer.go index 6b776c8c0..59a9d497c 100644 --- a/app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer.go +++ b/app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer.go @@ -127,10 +127,22 @@ func (l *LoadBalancerSyncer) CreateLoadBalancer(ing *v1beta1.Ingress, forceUpdat fmt.Println(hcErr) err = multierror.Append(err, hcErr) } - igs, namedPorts, igErr := l.getIGsAndNamedPorts(ing) - // Cant really create any backend service without named ports. No point continuing. + igs, igErr := l.getIGs(ing, l.clients) if igErr != nil { - return multierror.Append(err, igErr) + err = multierror.Append(err, igErr) + } + namedPorts, portsErr := l.getNamedPorts(igs) + if portsErr != nil { + err = multierror.Append(err, portsErr) + } + // Can not really create any backend service without named portsand instance groups. No point continuing. + if len(igs) == 0 { + err = multierror.Append(err, fmt.Errorf("No instance group found. Can not continue")) + return err + } + if len(namedPorts) == 0 { + err = multierror.Append(err, fmt.Errorf("No named ports found. Can not continue")) + return err } igsForBE := []string{} for k := range igs { @@ -249,6 +261,58 @@ func (l *LoadBalancerSyncer) DeleteLoadBalancer(ing *v1beta1.Ingress) error { return err } +// DeleteLoadBalancer deletes the GCP resources associated with the L7 GCP load balancer for the given ingress. +func (l *LoadBalancerSyncer) RemoveFromClusters(ing *v1beta1.Ingress, removeClients map[string]kubeclient.Interface, forceUpdate bool) error { + // TODO(nikhiljindal): Dont require the ingress yaml from users. Just the name should be enough. We can fetch ingress YAML from one of the clusters. + client, cErr := getAnyClient(l.clients) + if cErr != nil { + // No point in continuing without a client. + return cErr + } + var err error + ports := l.ingToNodePorts(ing, client) + removeIGLinks, igErr := l.getIGs(ing, removeClients) + if igErr != nil { + return multierror.Append(err, igErr) + } + // Can not really update any resource without igs. No point continuing. + // Note: User can run into this if they are trying to remove their already deleted clusters. + // TODO: Allow them to proceed to clean up whatever they can by using --force. + if len(removeIGLinks) == 0 { + err := multierror.Append(err, fmt.Errorf("No instance group found. Can not continue")) + return err + } + + if fwErr := l.fws.RemoveFromClusters(l.lbName, removeIGLinks); fwErr != nil { + // Aggregate errors and return all at the end. + err = multierror.Append(err, fwErr) + } + igsForBE := []string{} + for k := range removeIGLinks { + igsForBE = append(igsForBE, removeIGLinks[k]...) + } + + if beErr := l.bss.RemoveFromClusters(ports, igsForBE); beErr != nil { + // Aggregate errors and return all at the end. + err = multierror.Append(err, beErr) + } + + // Convert the client map to an array of cluster names. + removeClusters := make([]string, len(removeClients)) + removeIndex := 0 + for k := range removeClients { + removeClusters[removeIndex] = k + removeIndex++ + } + // Update the forwarding rule status at the end. + if frErr := l.frs.RemoveClustersFromStatus(removeClusters); frErr != nil { + // Aggregate errors and return all at the end. + err = multierror.Append(err, frErr) + } + + return err +} + // PrintStatus prints the current status of the load balancer. func (l *LoadBalancerSyncer) PrintStatus() (string, error) { sd, err := l.frs.GetLoadBalancerStatus(l.lbName) @@ -298,39 +362,42 @@ func (l *LoadBalancerSyncer) getIPAddress(ing *v1beta1.Ingress) (string, error) return ip.Address, nil } -// Returns links to all instance groups and named ports on any one of them. -// Note that it picks an instance group at random and returns the named ports for that instance group, assuming that the named ports are same on all instance groups. -// Also note that this returns all named ports on the instance group and not just the ones relevant to the given ingress. -func (l *LoadBalancerSyncer) getIGsAndNamedPorts(ing *v1beta1.Ingress) (map[string][]string, backendservice.NamedPortsMap, error) { +// Returns links to all instance groups corresponding the given ingress for the given clients. +func (l *LoadBalancerSyncer) getIGs(ing *v1beta1.Ingress, clients map[string]kubeclient.Interface) (map[string][]string, error) { var err error igs := make(map[string][]string, len(l.clients)) - var igFromLastCluster string // Get instance groups from all clusters. - for cluster, client := range l.clients { - igsFromCluster, getIGsErr := l.getIGs(ing, client, cluster) + for cluster, client := range clients { + igsFromCluster, getIGsErr := getIGsForCluster(ing, client, cluster) if getIGsErr != nil { err = multierror.Append(err, getIGsErr) continue } igs[cluster] = igsFromCluster - igFromLastCluster = igsFromCluster[0] } + return igs, err +} + +// Returns named ports on an instance from the given list. +// Note that it picks an instance group at random and returns the named ports for that instance group, assuming that the named ports are same on all instance groups. +// Also note that this returns all named ports on the instance group and not just the ones relevant to the given ingress. +func (l *LoadBalancerSyncer) getNamedPorts(igs map[string][]string) (backendservice.NamedPortsMap, error) { if len(igs) == 0 { - err = multierror.Append(err, fmt.Errorf("Cannot fetch named ports since fetching instance groups failed")) - return nil, backendservice.NamedPortsMap{}, err + return backendservice.NamedPortsMap{}, fmt.Errorf("Cannot fetch named ports since instance groups list is empty") } - // Compute named ports for igs from the last cluster. - // We can compute it from any instance group, all are expected to have the same named ports. - namedPorts, namedPortsErr := l.getNamedPorts(igFromLastCluster) - if namedPortsErr != nil { - err = multierror.Append(err, namedPortsErr) + + // Pick an IG at random. + var ig string + for _, v := range igs { + ig = v[0] + break } - return igs, namedPorts, err + return l.getNamedPortsForIG(ig) } -// Returns the instance groups corresponding to the given ingress. -// It fetches the given ingress from all clusters and extracts the instance group annotations on them to get the list of instance groups. -func (l *LoadBalancerSyncer) getIGs(ing *v1beta1.Ingress, client kubeclient.Interface, cluster string) ([]string, error) { +// Returns the instance groups corresponding to the given cluster. +// It fetches the given ingress from the cluster and extracts the instance group annotations on it to get the list of instance groups. +func getIGsForCluster(ing *v1beta1.Ingress, client kubeclient.Interface, cluster string) ([]string, error) { fmt.Println("Determining instance groups for cluster", cluster) key := annotations.InstanceGroupsAnnotationKey // Keep trying until ingress gets the instance group annotation. @@ -373,7 +440,7 @@ func (l *LoadBalancerSyncer) getIGs(ing *v1beta1.Ingress, client kubeclient.Inte return nil, nil } -func (l *LoadBalancerSyncer) getNamedPorts(igUrl string) (backendservice.NamedPortsMap, error) { +func (l *LoadBalancerSyncer) getNamedPortsForIG(igUrl string) (backendservice.NamedPortsMap, error) { zone, name, err := utils.GetZoneAndNameFromIGUrl(igUrl) if err != nil { return nil, err