diff --git a/cmd/client-keystone-auth/main.go b/cmd/client-keystone-auth/main.go index d8a39d0ffb..5ecf4a0aec 100644 --- a/cmd/client-keystone-auth/main.go +++ b/cmd/client-keystone-auth/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "fmt" "io" "net/http" @@ -156,7 +157,7 @@ func main() { Use: "client-keystone-auth", Short: "Keystone client credential plugin for Kubernetes", Run: func(cmd *cobra.Command, args []string) { - handle() + handle(context.Background()) }, Version: version.Version, } @@ -177,7 +178,7 @@ func main() { os.Exit(code) } -func handle() { +func handle(ctx context.Context) { // Generate Gophercloud Auth Options based on input data from stdin // if IsTerminal returns "true", or from env variables otherwise. if !term.IsTerminal(int(os.Stdin.Fd())) { @@ -214,7 +215,7 @@ func handle() { options.ClientKeyPath = clientKeyPath options.ClientCAPath = clientCAPath - token, err := keystone.GetToken(options) + token, err := keystone.GetToken(ctx, options) if err != nil { if gophercloud.ResponseCodeIs(err, http.StatusUnauthorized) { fmt.Println(errRespTemplate) diff --git a/cmd/k8s-keystone-auth/main.go b/cmd/k8s-keystone-auth/main.go index e5c4c12d8a..799edcd319 100644 --- a/cmd/k8s-keystone-auth/main.go +++ b/cmd/k8s-keystone-auth/main.go @@ -15,6 +15,7 @@ limitations under the License. package main import ( + "context" "os" "github.com/spf13/cobra" @@ -38,7 +39,7 @@ func main() { os.Exit(1) } - keystoneAuth, err := keystone.NewKeystoneAuth(config) + keystoneAuth, err := keystone.NewKeystoneAuth(context.Background(), config) if err != nil { klog.Errorf("%v", err) os.Exit(1) diff --git a/pkg/autohealing/cloudprovider/cloudprovider.go b/pkg/autohealing/cloudprovider/cloudprovider.go index ef45479bcf..d93780158c 100644 --- a/pkg/autohealing/cloudprovider/cloudprovider.go +++ b/pkg/autohealing/cloudprovider/cloudprovider.go @@ -17,6 +17,8 @@ limitations under the License. package cloudprovider import ( + "context" + "k8s.io/client-go/kubernetes" log "k8s.io/klog/v2" @@ -34,17 +36,17 @@ type CloudProvider interface { GetName() string // Update cluster health status. - UpdateHealthStatus([]healthcheck.NodeInfo, []healthcheck.NodeInfo) error + UpdateHealthStatus(context.Context, []healthcheck.NodeInfo, []healthcheck.NodeInfo) error // Repair triggers the node repair process in the cloud. - Repair([]healthcheck.NodeInfo) error + Repair(context.Context, []healthcheck.NodeInfo) error // Enabled decides if the repair should be triggered. // It's recommended that the `Enabled()` function of the cloud provider doesn't allow to re-trigger when the repair // is in place, e.g. before the repair process is finished, `Enabled()` should return false so that we won't // re-trigger the repair process in the subsequent checks. // This function also provides the cluster admin the capability to disable the cluster auto healing on the fly. - Enabled() bool + Enabled(context.Context) bool } type RegisterFunc func(config config.Config, client kubernetes.Interface) (CloudProvider, error) diff --git a/pkg/autohealing/cloudprovider/openstack/provider.go b/pkg/autohealing/cloudprovider/openstack/provider.go index 0bb093d9e8..2aa06e67e7 100644 --- a/pkg/autohealing/cloudprovider/openstack/provider.go +++ b/pkg/autohealing/cloudprovider/openstack/provider.go @@ -95,8 +95,8 @@ func (provider CloudProvider) GetName() string { } // getStackName finds the name of a stack matching a given ID. -func (provider *CloudProvider) getStackName(stackID string) (string, error) { - stack, err := stacks.Find(context.TODO(), provider.Heat, stackID).Extract() +func (provider *CloudProvider) getStackName(ctx context.Context, stackID string) (string, error) { + stack, err := stacks.Find(ctx, provider.Heat, stackID).Extract() if err != nil { return "", err } @@ -108,14 +108,14 @@ func (provider *CloudProvider) getStackName(stackID string) (string, error) { // masters and minions(workers). The key in the map is the server/instance ID // in Nova and the value is the resource ID and name of the server, and the // parent stack ID and name. -func (provider *CloudProvider) getAllStackResourceMapping(stackName, stackID string) (m map[string]ResourceStackRelationship, err error) { +func (provider *CloudProvider) getAllStackResourceMapping(ctx context.Context, stackName, stackID string) (m map[string]ResourceStackRelationship, err error) { if provider.ResourceStackMapping != nil { return provider.ResourceStackMapping, nil } mapping := make(map[string]ResourceStackRelationship) - serverPages, err := stackresources.List(provider.Heat, stackName, stackID, stackresources.ListOpts{Depth: 2}).AllPages(context.TODO()) + serverPages, err := stackresources.List(provider.Heat, stackName, stackID, stackresources.ListOpts{Depth: 2}).AllPages(ctx) if err != nil { return m, err } @@ -266,7 +266,7 @@ func (provider CloudProvider) waitForServerDetachVolumes(serverID string, timeou // will be kept as False, which means the node need to be rebuilt to fix it, otherwise it means the has been processed. // // The bool type return value means that if the node has been processed from a first time repair PoV -func (provider CloudProvider) firstTimeRepair(n healthcheck.NodeInfo, serverID string, firstTimeRebootNodes map[string]healthcheck.NodeInfo) (bool, error) { +func (provider CloudProvider) firstTimeRepair(ctx context.Context, n healthcheck.NodeInfo, serverID string, firstTimeRebootNodes map[string]healthcheck.NodeInfo) (bool, error) { var firstTimeUnhealthy = true for id := range unHealthyNodes { log.V(5).Infof("comparing server ID %s with known broken ID %s", serverID, id) @@ -281,7 +281,7 @@ func (provider CloudProvider) firstTimeRepair(n healthcheck.NodeInfo, serverID s if firstTimeUnhealthy { log.Infof("rebooting node %s to repair it", serverID) - if res := servers.Reboot(context.TODO(), provider.Nova, serverID, servers.RebootOpts{Type: servers.SoftReboot}); res.Err != nil { + if res := servers.Reboot(ctx, provider.Nova, serverID, servers.RebootOpts{Type: servers.SoftReboot}); res.Err != nil { // Usually it means the node is being rebooted log.Warningf("failed to reboot node %s, error: %v", serverID, res.Err) if strings.Contains(res.Err.Error(), "reboot_started") { @@ -351,7 +351,7 @@ func (provider CloudProvider) firstTimeRepair(n healthcheck.NodeInfo, serverID s // - Heat stack ID and resource ID. // // For worker nodes: Call Magnum resize API directly. -func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { +func (provider CloudProvider) Repair(ctx context.Context, nodes []healthcheck.NodeInfo) error { if len(nodes) == 0 { return nil } @@ -370,12 +370,12 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { firstTimeRebootNodes := make(map[string]healthcheck.NodeInfo) - err := provider.UpdateHealthStatus(masters, workers) + err := provider.UpdateHealthStatus(ctx, masters, workers) if err != nil { return fmt.Errorf("failed to update the health status of cluster %s, error: %v", clusterName, err) } - cluster, err := clusters.Get(context.TODO(), provider.Magnum, clusterName).Extract() + cluster, err := clusters.Get(ctx, provider.Magnum, clusterName).Extract() if err != nil { return fmt.Errorf("failed to get the cluster %s, error: %v", clusterName, err) } @@ -389,7 +389,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { continue } - if processed, err := provider.firstTimeRepair(n, serverID, firstTimeRebootNodes); err != nil { + if processed, err := provider.firstTimeRepair(ctx, n, serverID, firstTimeRebootNodes); err != nil { log.Warningf("Failed to process if the node %s is in first time repair , error: %v", serverID, err) } else if processed { log.Infof("Node %s has been processed", serverID) @@ -405,7 +405,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { } nodesToReplace.Insert(serverID) - ng, err := provider.getNodeGroup(clusterName, n) + ng, err := provider.getNodeGroup(ctx, clusterName, n) ngName := "default-worker" ngNodeCount := &cluster.NodeCount if err == nil { @@ -419,7 +419,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { NodesToRemove: nodesToReplace.List(), } - clusters.Resize(context.TODO(), provider.Magnum, clusterName, opts) + clusters.Resize(ctx, provider.Magnum, clusterName, opts) // Wait 10 seconds to make sure Magnum has already got the request // to avoid sending all of the resize API calls at the same time. time.Sleep(10 * time.Second) @@ -432,14 +432,14 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { log.Infof("Cluster %s resized", clusterName) } } else { - clusterStackName, err := provider.getStackName(cluster.StackID) + clusterStackName, err := provider.getStackName(ctx, cluster.StackID) if err != nil { return fmt.Errorf("failed to get the Heat stack for cluster %s, error: %v", clusterName, err) } // In order to rebuild the nodes by Heat stack update, we need to know the parent stack ID of the resources and // mark them unhealthy first. - allMapping, err := provider.getAllStackResourceMapping(clusterStackName, cluster.StackID) + allMapping, err := provider.getAllStackResourceMapping(ctx, clusterStackName, cluster.StackID) if err != nil { return fmt.Errorf("failed to get the resource stack mapping for cluster %s, error: %v", clusterName, err) } @@ -456,7 +456,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { continue } - if processed, err := provider.firstTimeRepair(n, serverID, firstTimeRebootNodes); err != nil { + if processed, err := provider.firstTimeRepair(ctx, n, serverID, firstTimeRebootNodes); err != nil { log.Warningf("Failed to process if the node %s is in first time repair , error: %v", serverID, err) } else if processed { log.Infof("Node %s has been processed", serverID) @@ -468,7 +468,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { } else { // Mark root volume as unhealthy if rootVolumeID != "" { - err = stackresources.MarkUnhealthy(context.TODO(), provider.Heat, allMapping[serverID].StackName, allMapping[serverID].StackID, rootVolumeID, opts).ExtractErr() + err = stackresources.MarkUnhealthy(ctx, provider.Heat, allMapping[serverID].StackName, allMapping[serverID].StackID, rootVolumeID, opts).ExtractErr() if err != nil { log.Errorf("failed to mark resource %s unhealthy, error: %v", rootVolumeID, err) } @@ -479,7 +479,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { log.Warningf("Failed to shutdown the server %s, error: %v", serverID, err) // If the server is failed to delete after 180s, then delete it to avoid the // stack update failure later. - res := servers.ForceDelete(context.TODO(), provider.Nova, serverID) + res := servers.ForceDelete(ctx, provider.Nova, serverID) if res.Err != nil { log.Warningf("Failed to delete the server %s, error: %v", serverID, err) } @@ -488,7 +488,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { log.Infof("Marking Nova VM %s(Heat resource %s) unhealthy for Heat stack %s", serverID, allMapping[serverID].ResourceID, cluster.StackID) // Mark VM as unhealthy - err = stackresources.MarkUnhealthy(context.TODO(), provider.Heat, allMapping[serverID].StackName, allMapping[serverID].StackID, allMapping[serverID].ResourceID, opts).ExtractErr() + err = stackresources.MarkUnhealthy(ctx, provider.Heat, allMapping[serverID].StackName, allMapping[serverID].StackID, allMapping[serverID].ResourceID, opts).ExtractErr() if err != nil { log.Errorf("failed to mark resource %s unhealthy, error: %v", serverID, err) } @@ -496,7 +496,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { delete(unHealthyNodes, serverID) } - if err := stacks.UpdatePatch(context.TODO(), provider.Heat, clusterStackName, cluster.StackID, stacks.UpdateOpts{}).ExtractErr(); err != nil { + if err := stacks.UpdatePatch(ctx, provider.Heat, clusterStackName, cluster.StackID, stacks.UpdateOpts{}).ExtractErr(); err != nil { return fmt.Errorf("failed to update Heat stack to rebuild resources, error: %v", err) } @@ -514,7 +514,7 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { log.Infof("Skip node delete for %s because it's repaired by reboot", serverID) continue } - if err := provider.KubeClient.CoreV1().Nodes().Delete(context.TODO(), n.KubeNode.Name, metav1.DeleteOptions{}); err != nil { + if err := provider.KubeClient.CoreV1().Nodes().Delete(ctx, n.KubeNode.Name, metav1.DeleteOptions{}); err != nil { log.Errorf("Failed to remove the node %s from cluster, error: %v", n.KubeNode.Name, err) } } @@ -522,10 +522,10 @@ func (provider CloudProvider) Repair(nodes []healthcheck.NodeInfo) error { return nil } -func (provider CloudProvider) getNodeGroup(clusterName string, node healthcheck.NodeInfo) (nodegroups.NodeGroup, error) { +func (provider CloudProvider) getNodeGroup(ctx context.Context, clusterName string, node healthcheck.NodeInfo) (nodegroups.NodeGroup, error) { var ng nodegroups.NodeGroup - ngPages, err := nodegroups.List(provider.Magnum, clusterName, nodegroups.ListOpts{}).AllPages(context.TODO()) + ngPages, err := nodegroups.List(provider.Magnum, clusterName, nodegroups.ListOpts{}).AllPages(ctx) if err == nil { ngs, err := nodegroups.ExtractNodeGroups(ngPages) if err != nil { @@ -533,7 +533,7 @@ func (provider CloudProvider) getNodeGroup(clusterName string, node healthcheck. return ng, err } for _, ng := range ngs { - ngInfo, err := nodegroups.Get(context.TODO(), provider.Magnum, clusterName, ng.UUID).Extract() + ngInfo, err := nodegroups.Get(ctx, provider.Magnum, clusterName, ng.UUID).Extract() if err != nil { log.Warningf("Failed to get node group for cluster %s, error: %v", clusterName, err) return ng, err @@ -555,7 +555,7 @@ func (provider CloudProvider) getNodeGroup(clusterName string, node healthcheck. // UpdateHealthStatus can update the cluster health status to reflect the // real-time health status of the k8s cluster. -func (provider CloudProvider) UpdateHealthStatus(masters []healthcheck.NodeInfo, workers []healthcheck.NodeInfo) error { +func (provider CloudProvider) UpdateHealthStatus(ctx context.Context, masters []healthcheck.NodeInfo, workers []healthcheck.NodeInfo) error { log.Infof("start to update cluster health status.") clusterName := provider.Config.ClusterName @@ -600,7 +600,7 @@ func (provider CloudProvider) UpdateHealthStatus(masters []healthcheck.NodeInfo, } log.Infof("updating cluster health status as %s for reason %s.", healthStatus, healthStatusReason) - res := clusters.Update(context.TODO(), provider.Magnum, clusterName, updateOpts) + res := clusters.Update(ctx, provider.Magnum, clusterName, updateOpts) if res.Err != nil { return fmt.Errorf("failed to update the health status of cluster %s error: %v", clusterName, res.Err) @@ -617,10 +617,10 @@ func (provider CloudProvider) UpdateHealthStatus(masters []healthcheck.NodeInfo, // There are two conditions that we disable the repair: // - The cluster admin disables the auto healing via OpenStack API. // - The Magnum cluster is not in stable status. -func (provider CloudProvider) Enabled() bool { +func (provider CloudProvider) Enabled(ctx context.Context) bool { clusterName := provider.Config.ClusterName - cluster, err := clusters.Get(context.TODO(), provider.Magnum, clusterName).Extract() + cluster, err := clusters.Get(ctx, provider.Magnum, clusterName).Extract() if err != nil { log.Warningf("failed to get the cluster %s, error: %v", clusterName, err) return false @@ -644,12 +644,12 @@ func (provider CloudProvider) Enabled() bool { return false } - clusterStackName, err := provider.getStackName(cluster.StackID) + clusterStackName, err := provider.getStackName(ctx, cluster.StackID) if err != nil { log.Warningf("Failed to get the Heat stack ID for cluster %s, error: %v", clusterName, err) return false } - stack, err := stacks.Get(context.TODO(), provider.Heat, clusterStackName, cluster.StackID).Extract() + stack, err := stacks.Get(ctx, provider.Heat, clusterStackName, cluster.StackID).Extract() if err != nil { log.Warningf("Failed to get Heat stack %s for cluster %s, error: %v", cluster.StackID, clusterName, err) return false diff --git a/pkg/autohealing/cmd/root.go b/pkg/autohealing/cmd/root.go index 90d25a46ca..a5afb9830b 100644 --- a/pkg/autohealing/cmd/root.go +++ b/pkg/autohealing/cmd/root.go @@ -50,10 +50,11 @@ var rootCmd = &cobra.Command{ "OpenStack is supported by default.", Run: func(cmd *cobra.Command, args []string) { + ctx := context.TODO() autohealer := controller.NewController(conf) if !conf.LeaderElect { - autohealer.Start(context.TODO()) + autohealer.Start(ctx) panic("unreachable") } @@ -63,7 +64,7 @@ var rootCmd = &cobra.Command{ } // Try and become the leader and start autohealing loops - leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{ + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, LeaseDuration: 20 * time.Second, RenewDeadline: 15 * time.Second, diff --git a/pkg/autohealing/controller/controller.go b/pkg/autohealing/controller/controller.go index 1742d5c3dd..df2b6bc8c2 100644 --- a/pkg/autohealing/controller/controller.go +++ b/pkg/autohealing/controller/controller.go @@ -198,8 +198,8 @@ type Controller struct { // UpdateNodeAnnotation updates the specified node annotation, if value equals empty string, the annotation will be // removed. This implements the interface healthcheck.NodeController -func (c *Controller) UpdateNodeAnnotation(node healthcheck.NodeInfo, annotation string, value string) error { - n, err := c.kubeClient.CoreV1().Nodes().Get(context.TODO(), node.KubeNode.Name, metav1.GetOptions{}) +func (c *Controller) UpdateNodeAnnotation(ctx context.Context, node healthcheck.NodeInfo, annotation string, value string) error { + n, err := c.kubeClient.CoreV1().Nodes().Get(ctx, node.KubeNode.Name, metav1.GetOptions{}) if err != nil { return err } @@ -210,7 +210,7 @@ func (c *Controller) UpdateNodeAnnotation(node healthcheck.NodeInfo, annotation n.Annotations[annotation] = value } - if _, err := c.kubeClient.CoreV1().Nodes().Update(context.TODO(), n, metav1.UpdateOptions{}); err != nil { + if _, err := c.kubeClient.CoreV1().Nodes().Update(ctx, n, metav1.UpdateOptions{}); err != nil { return err } @@ -244,7 +244,7 @@ func (c *Controller) GetLeaderElectionLock() (resourcelock.Interface, error) { } // getUnhealthyMasterNodes returns the master nodes that need to be repaired. -func (c *Controller) getUnhealthyMasterNodes() ([]healthcheck.NodeInfo, error) { +func (c *Controller) getUnhealthyMasterNodes(ctx context.Context) ([]healthcheck.NodeInfo, error) { var nodes []healthcheck.NodeInfo // If no checkers defined, skip @@ -254,7 +254,7 @@ func (c *Controller) getUnhealthyMasterNodes() ([]healthcheck.NodeInfo, error) { } // Get all the master nodes need to check - nodeList, err := c.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := c.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } @@ -272,13 +272,13 @@ func (c *Controller) getUnhealthyMasterNodes() ([]healthcheck.NodeInfo, error) { } // Do health check - unhealthyNodes := healthcheck.CheckNodes(c.masterCheckers, nodes, c) + unhealthyNodes := healthcheck.CheckNodes(ctx, c.masterCheckers, nodes, c) return unhealthyNodes, nil } // getUnhealthyWorkerNodes returns the nodes that need to be repaired. -func (c *Controller) getUnhealthyWorkerNodes() ([]healthcheck.NodeInfo, error) { +func (c *Controller) getUnhealthyWorkerNodes(ctx context.Context) ([]healthcheck.NodeInfo, error) { var nodes []healthcheck.NodeInfo // If no checkers defined, skip. @@ -288,7 +288,7 @@ func (c *Controller) getUnhealthyWorkerNodes() ([]healthcheck.NodeInfo, error) { } // Get all the worker nodes. - nodeList, err := c.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodeList, err := c.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } @@ -309,12 +309,12 @@ func (c *Controller) getUnhealthyWorkerNodes() ([]healthcheck.NodeInfo, error) { } // Do health check - unhealthyNodes := healthcheck.CheckNodes(c.workerCheckers, nodes, c) + unhealthyNodes := healthcheck.CheckNodes(ctx, c.workerCheckers, nodes, c) return unhealthyNodes, nil } -func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { +func (c *Controller) repairNodes(ctx context.Context, unhealthyNodes []healthcheck.NodeInfo) { unhealthyNodeNames := sets.NewString() for _, n := range unhealthyNodes { unhealthyNodeNames.Insert(n.KubeNode.Name) @@ -322,7 +322,7 @@ func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { // Trigger unhealthy nodes repair. if len(unhealthyNodes) > 0 { - if !c.provider.Enabled() { + if !c.provider.Enabled(ctx) { // The cloud provider doesn't allow to trigger node repair. log.Infof("Auto healing is ignored for nodes %s", unhealthyNodeNames.List()) } else { @@ -340,13 +340,13 @@ func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of Node before attempting update // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver - newNode, err := c.kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + newNode, err := c.kubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) if err != nil { log.Errorf("Failed to get node %s, error: %v before update", nodeName, err) return err } newNode.Spec.Unschedulable = true - if _, updateErr := c.kubeClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}); updateErr != nil { + if _, updateErr := c.kubeClient.CoreV1().Nodes().Update(ctx, newNode, metav1.UpdateOptions{}); updateErr != nil { log.Warningf("Failed in retry to cordon node %s, error: %v", nodeName, updateErr) return updateErr } else { @@ -361,7 +361,7 @@ func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { } // Start to repair all the unhealthy nodes. - if err := c.provider.Repair(unhealthyNodes); err != nil { + if err := c.provider.Repair(ctx, unhealthyNodes); err != nil { log.Errorf("Failed to repair the nodes %s, error: %v", unhealthyNodeNames.List(), err) } } @@ -371,12 +371,12 @@ func (c *Controller) repairNodes(unhealthyNodes []healthcheck.NodeInfo) { // startMasterMonitor checks if there are failed master nodes and triggers the repair action. This function is supposed // to be running in a goroutine. -func (c *Controller) startMasterMonitor(wg *sync.WaitGroup) { +func (c *Controller) startMasterMonitor(ctx context.Context, wg *sync.WaitGroup) { log.V(3).Info("Starting to check master nodes.") defer wg.Done() // Get all the unhealthy master nodes. - unhealthyNodes, err := c.getUnhealthyMasterNodes() + unhealthyNodes, err := c.getUnhealthyMasterNodes(ctx) if err != nil { log.Errorf("Failed to get unhealthy master nodes, error: %v", err) return @@ -384,7 +384,7 @@ func (c *Controller) startMasterMonitor(wg *sync.WaitGroup) { masterUnhealthyNodes = append(masterUnhealthyNodes, unhealthyNodes...) - c.repairNodes(unhealthyNodes) + c.repairNodes(ctx, unhealthyNodes) if len(unhealthyNodes) == 0 { log.V(3).Info("Master nodes are healthy") @@ -395,12 +395,12 @@ func (c *Controller) startMasterMonitor(wg *sync.WaitGroup) { // startWorkerMonitor checks if there are failed worker nodes and triggers the repair action. This function is supposed // to be running in a goroutine. -func (c *Controller) startWorkerMonitor(wg *sync.WaitGroup) { +func (c *Controller) startWorkerMonitor(ctx context.Context, wg *sync.WaitGroup) { log.V(3).Info("Starting to check worker nodes.") defer wg.Done() // Get all the unhealthy worker nodes. - unhealthyNodes, err := c.getUnhealthyWorkerNodes() + unhealthyNodes, err := c.getUnhealthyWorkerNodes(ctx) if err != nil { log.Errorf("Failed to get unhealthy worker nodes, error: %v", err) return @@ -408,7 +408,7 @@ func (c *Controller) startWorkerMonitor(wg *sync.WaitGroup) { workerUnhealthyNodes = append(workerUnhealthyNodes, unhealthyNodes...) - c.repairNodes(unhealthyNodes) + c.repairNodes(ctx, unhealthyNodes) if len(unhealthyNodes) == 0 { log.V(3).Info("Worker nodes are healthy") @@ -431,17 +431,17 @@ func (c *Controller) Start(ctx context.Context) { <-ticker.C if c.config.MasterMonitorEnabled { wg.Add(1) - go c.startMasterMonitor(&wg) + go c.startMasterMonitor(ctx, &wg) } if c.config.WorkerMonitorEnabled { wg.Add(1) - go c.startWorkerMonitor(&wg) + go c.startWorkerMonitor(ctx, &wg) } wg.Wait() - if c.provider.Enabled() { - err := c.provider.UpdateHealthStatus(masterUnhealthyNodes, workerUnhealthyNodes) + if c.provider.Enabled(ctx) { + err := c.provider.UpdateHealthStatus(ctx, masterUnhealthyNodes, workerUnhealthyNodes) if err != nil { log.Warningf("Unable to update health status. Retrying. %v", err) } diff --git a/pkg/autohealing/healthcheck/healthcheck.go b/pkg/autohealing/healthcheck/healthcheck.go index 3b06aa209e..598d0d6e1e 100644 --- a/pkg/autohealing/healthcheck/healthcheck.go +++ b/pkg/autohealing/healthcheck/healthcheck.go @@ -17,6 +17,7 @@ limitations under the License. package healthcheck import ( + "context" "time" apiv1 "k8s.io/api/core/v1" @@ -40,7 +41,7 @@ type NodeInfo struct { type HealthCheck interface { // Check checks the node health, returns false if the node is unhealthy. The plugin should deal with any error happened. - Check(node NodeInfo, controller NodeController) bool + Check(ctx context.Context, node NodeInfo, controller NodeController) bool // IsMasterSupported checks if the health check plugin supports master node. IsMasterSupported() bool @@ -56,7 +57,7 @@ type HealthCheck interface { type NodeController interface { // UpdateNodeAnnotation updates the specified node annotation, if value equals empty string, the annotation will be // removed. - UpdateNodeAnnotation(node NodeInfo, annotation string, value string) error + UpdateNodeAnnotation(ctx context.Context, node NodeInfo, annotation string, value string) error } func RegisterHealthCheck(name string, register registerPlugin) { @@ -77,13 +78,13 @@ func GetHealthChecker(name string, config interface{}) (HealthCheck, error) { } // CheckNodes goes through the health checkers, returns the unhealthy nodes. -func CheckNodes(checkers []HealthCheck, nodes []NodeInfo, controller NodeController) []NodeInfo { +func CheckNodes(ctx context.Context, checkers []HealthCheck, nodes []NodeInfo, controller NodeController) []NodeInfo { var unhealthyNodes []NodeInfo // Check the health for each node. for _, node := range nodes { for _, checker := range checkers { - if !checker.Check(node, controller) { + if !checker.Check(ctx, node, controller) { node.FailedCheck = checker.GetName() node.FoundAt = time.Now() unhealthyNodes = append(unhealthyNodes, node) diff --git a/pkg/autohealing/healthcheck/plugin_endpoint.go b/pkg/autohealing/healthcheck/plugin_endpoint.go index 80fdf639ae..6bdc529f66 100644 --- a/pkg/autohealing/healthcheck/plugin_endpoint.go +++ b/pkg/autohealing/healthcheck/plugin_endpoint.go @@ -17,6 +17,7 @@ limitations under the License. package healthcheck import ( + "context" "crypto/tls" "fmt" "net/http" @@ -78,12 +79,12 @@ func (check *EndpointCheck) IsWorkerSupported() bool { } // checkDuration checks if the node should be marked as healthy or not. -func (check *EndpointCheck) checkDuration(node NodeInfo, controller NodeController, checkRet bool) bool { +func (check *EndpointCheck) checkDuration(ctx context.Context, node NodeInfo, controller NodeController, checkRet bool) bool { name := node.KubeNode.Name if checkRet { // Remove the annotation - if err := controller.UpdateNodeAnnotation(node, check.UnhealthyAnnotation, ""); err != nil { + if err := controller.UpdateNodeAnnotation(ctx, node, check.UnhealthyAnnotation, ""); err != nil { log.Errorf("Failed to remove the node annotation(will skip the check) for %s, error: %v", name, err) } return true @@ -106,7 +107,7 @@ func (check *EndpointCheck) checkDuration(node NodeInfo, controller NodeControll if unhealthyStartTime == nil { // Set the annotation value - if err := controller.UpdateNodeAnnotation(node, check.UnhealthyAnnotation, now.Format(TimeLayout)); err != nil { + if err := controller.UpdateNodeAnnotation(ctx, node, check.UnhealthyAnnotation, now.Format(TimeLayout)); err != nil { log.Errorf("Failed to set the node annotation(will skip the check) for %s, error: %v", name, err) } return true @@ -121,7 +122,7 @@ func (check *EndpointCheck) checkDuration(node NodeInfo, controller NodeControll } // Check checks the node health, returns false if the node is unhealthy. Update the node cache accordingly. -func (check *EndpointCheck) Check(node NodeInfo, controller NodeController) bool { +func (check *EndpointCheck) Check(ctx context.Context, node NodeInfo, controller NodeController) bool { nodeName := node.KubeNode.Name ip := "" for _, addr := range node.KubeNode.Status.Addresses { @@ -152,7 +153,7 @@ func (check *EndpointCheck) Check(node NodeInfo, controller NodeController) bool req, err := http.NewRequest("GET", url, nil) if err != nil { log.Errorf("Node %s, failed to get request %s, error: %v", nodeName, url, err) - return check.checkDuration(node, controller, false) + return check.checkDuration(ctx, node, controller, false) } if check.RequireToken { @@ -171,17 +172,17 @@ func (check *EndpointCheck) Check(node NodeInfo, controller NodeController) bool resp, err := client.Do(req) if err != nil { log.Errorf("Node %s, failed to read response for url %s, error: %v", nodeName, url, err) - return check.checkDuration(node, controller, false) + return check.checkDuration(ctx, node, controller, false) } resp.Body.Close() if !utils.ContainsInt(check.OKCodes, resp.StatusCode) { log.V(4).Infof("Node %s, return code for url %s is %d, expected: %d", nodeName, url, resp.StatusCode, check.OKCodes) - return check.checkDuration(node, controller, false) + return check.checkDuration(ctx, node, controller, false) } } - return check.checkDuration(node, controller, true) + return check.checkDuration(ctx, node, controller, true) } func NewEndpointCheck(config interface{}) (HealthCheck, error) { diff --git a/pkg/autohealing/healthcheck/plugin_nodecondition.go b/pkg/autohealing/healthcheck/plugin_nodecondition.go index 222c215d10..eb9eafb627 100644 --- a/pkg/autohealing/healthcheck/plugin_nodecondition.go +++ b/pkg/autohealing/healthcheck/plugin_nodecondition.go @@ -17,6 +17,7 @@ limitations under the License. package healthcheck import ( + "context" "fmt" "time" @@ -45,7 +46,7 @@ type NodeConditionCheck struct { } // Check checks the node health, returns false if the node is unhealthy. -func (check *NodeConditionCheck) Check(node NodeInfo, controller NodeController) bool { +func (check *NodeConditionCheck) Check(ctx context.Context, node NodeInfo, controller NodeController) bool { nodeName := node.KubeNode.Name for _, cond := range node.KubeNode.Status.Conditions { diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index 43b2ba1941..b9c517cc9c 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -103,7 +103,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } // Verify a volume with the provided name doesn't already exist for this tenant - vols, err := cloud.GetVolumesByName(volName) + vols, err := cloud.GetVolumesByName(ctx, volName) if err != nil { klog.Errorf("Failed to query for existing Volume during CreateVolume: %v", err) return nil, status.Errorf(codes.Internal, "Failed to get volumes: %v", err) @@ -142,7 +142,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if content != nil && content.GetSnapshot() != nil { snapshotID = content.GetSnapshot().GetSnapshotId() - snap, err := cloud.GetSnapshotByID(snapshotID) + snap, err := cloud.GetSnapshotByID(ctx, snapshotID) if err != nil && !cpoerrors.IsNotFound(err) { return nil, err } @@ -155,7 +155,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // check if a Backup with the same ID exists if backupsAreEnabled && cpoerrors.IsNotFound(err) { var back *backups.Backup - back, err = cloud.GetBackupByID(snapshotID) + back, err = cloud.GetBackupByID(ctx, snapshotID) if err != nil { //If there is an error getting the backup as well, fail. return nil, status.Errorf(codes.NotFound, "VolumeContentSource Snapshot or Backup with ID %s not found", snapshotID) @@ -177,7 +177,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol if content != nil && content.GetVolume() != nil { sourceVolID = content.GetVolume().GetVolumeId() - _, err := cloud.GetVolume(sourceVolID) + _, err := cloud.GetVolume(ctx, sourceVolID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "Source Volume %s not found", sourceVolID) @@ -206,11 +206,11 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol klog.V(4).Infof("CreateVolume: Getting scheduler hints: affinity=%s, anti-affinity=%s", affinity, antiAffinity) // resolve volume names to UUIDs - affinity, err = cloud.ResolveVolumeListToUUIDs(affinity) + affinity, err = cloud.ResolveVolumeListToUUIDs(ctx, affinity) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to resolve affinity volume UUIDs: %v", err) } - antiAffinity, err = cloud.ResolveVolumeListToUUIDs(antiAffinity) + antiAffinity, err = cloud.ResolveVolumeListToUUIDs(ctx, antiAffinity) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to resolve anti-affinity volume UUIDs: %v", err) } @@ -225,7 +225,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol klog.V(4).Infof("CreateVolume: Resolved scheduler hints: affinity=%s, anti-affinity=%s", affinity, antiAffinity) } - vol, err := cloud.CreateVolume(opts, schedulerHints) + vol, err := cloud.CreateVolume(ctx, opts, schedulerHints) if err != nil { klog.Errorf("Failed to CreateVolume: %v", err) return nil, status.Errorf(codes.Internal, "CreateVolume failed with error %v", err) @@ -261,7 +261,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol if len(volID) == 0 { return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided") } - err := cloud.DeleteVolume(volID) + err := cloud.DeleteVolume(ctx, volID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Volume %s is already deleted.", volID) @@ -301,7 +301,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Error(codes.InvalidArgument, "[ControllerPublishVolume] Volume capability must be provided") } - _, err := cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(ctx, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Volume %s not found", volumeID) @@ -309,7 +309,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] get volume failed with error %v", err) } - _, err = cloud.GetInstanceByID(instanceID) + _, err = cloud.GetInstanceByID(ctx, instanceID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Instance %s not found", instanceID) @@ -317,20 +317,20 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] GetInstanceByID failed with error %v", err) } - _, err = cloud.AttachVolume(instanceID, volumeID) + _, err = cloud.AttachVolume(ctx, instanceID, volumeID) if err != nil { klog.Errorf("Failed to AttachVolume: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] Attach Volume failed with error %v", err) } - err = cloud.WaitDiskAttached(instanceID, volumeID) + err = cloud.WaitDiskAttached(ctx, instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskAttached: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to attach volume: %v", err) } - devicePath, err := cloud.GetAttachmentDiskPath(instanceID, volumeID) + devicePath, err := cloud.GetAttachmentDiskPath(ctx, instanceID, volumeID) if err != nil { klog.Errorf("Failed to GetAttachmentDiskPath: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to get device path of attached volume: %v", err) @@ -364,7 +364,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "[ControllerUnpublishVolume] Volume ID must be provided") } - _, err := cloud.GetInstanceByID(instanceID) + _, err := cloud.GetInstanceByID(ctx, instanceID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because node %s does not exist", volumeID, instanceID) @@ -373,7 +373,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "[ControllerUnpublishVolume] GetInstanceByID failed with error %v", err) } - err = cloud.DetachVolume(instanceID, volumeID) + err = cloud.DetachVolume(ctx, instanceID, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because it does not exist", volumeID) @@ -383,7 +383,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume Detach Volume failed with error %v", err) } - err = cloud.WaitDiskDetached(instanceID, volumeID) + err = cloud.WaitDiskDetached(ctx, instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskDetached: %v", err) if cpoerrors.IsNotFound(err) { @@ -479,9 +479,9 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume } for idx := startIdx; idx < len(cloudsNames); idx++ { if maxEntries > 0 { - vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries-len(cloudsVentries), startingToken) + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(ctx, maxEntries-len(cloudsVentries), startingToken) } else { - vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries, startingToken) + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(ctx, maxEntries, startingToken) } startingToken = nextPageToken if err != nil { @@ -514,7 +514,7 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume // set token to next non empty cloud i := 0 for i = idx + 1; i < len(cloudsNames); i++ { - vlistTmp, _, err := cs.Clouds[cloudsNames[i]].ListVolumes(1, "") + vlistTmp, _, err := cs.Clouds[cloudsNames[i]].ListVolumes(ctx, 1, "") if err != nil { klog.Errorf("Failed to ListVolumes: %v", err) if cpoerrors.IsInvalidError(err) { @@ -612,7 +612,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.FailedPrecondition, "Backups are not enabled in Cinder") } // Get a list of backups with the provided name - backups, err = cloud.ListBackups(filters) + backups, err = cloud.ListBackups(ctx, filters) if err != nil { klog.Errorf("Failed to query for existing Backup during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get backups") @@ -626,7 +626,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if len(backups) == 1 { // since backup.VolumeID is not part of ListBackups response // we need fetch single backup to get the full object. - backup, err = cloud.GetBackupByID(backups[0].ID) + backup, err = cloud.GetBackupByID(ctx, backups[0].ID) if err != nil { klog.Errorf("Failed to get backup by ID %s: %v", backup.ID, err) return nil, status.Error(codes.Internal, "Failed to get backup by ID") @@ -655,7 +655,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // Create the snapshot if the backup does not already exist and wait for it to be ready if !backupAlreadyExists { - snap, err = cs.createSnapshot(cloud, name, volumeID, req.Parameters) + snap, err = cs.createSnapshot(ctx, cloud, name, volumeID, req.Parameters) if err != nil { return nil, err } @@ -665,7 +665,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - snap.Status, err = cloud.WaitSnapshotReady(snap.ID) + snap.Status, err = cloud.WaitSnapshotReady(ctx, snap.ID) if err != nil { klog.Errorf("Failed to WaitSnapshotReady: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error: %v. Current snapshot status: %v", err, snap.Status) @@ -690,7 +690,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if snapshotType == "backup" { if !backupAlreadyExists { - backup, err = cs.createBackup(cloud, name, volumeID, snap, req.Parameters) + backup, err = cs.createBackup(ctx, cloud, name, volumeID, snap, req.Parameters) if err != nil { return nil, err } @@ -701,20 +701,20 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - backup.Status, err = cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) + backup.Status, err = cloud.WaitBackupReady(ctx, backup.ID, snapSize, backupMaxDurationSecondsPerGB) if err != nil { klog.Errorf("Failed to WaitBackupReady: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v. Current backups status: %s", err, backup.Status)) } // Necessary to get all the backup information, including size. - backup, err = cloud.GetBackupByID(backup.ID) + backup, err = cloud.GetBackupByID(ctx, backup.ID) if err != nil { klog.Errorf("Failed to GetBackupByID after backup creation: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("GetBackupByID failed with error %v", err)) } - err = cloud.DeleteSnapshot(backup.SnapshotID) + err = cloud.DeleteSnapshot(ctx, backup.SnapshotID) if err != nil && !cpoerrors.IsNotFound(err) { klog.Errorf("Failed to DeleteSnapshot: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) @@ -733,12 +733,12 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } -func (cs *controllerServer) createSnapshot(cloud openstack.IOpenStack, name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { +func (cs *controllerServer) createSnapshot(ctx context.Context, cloud openstack.IOpenStack, name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { filters := map[string]string{} filters["Name"] = name // List existing snapshots with the same name - snapshots, _, err := cloud.ListSnapshots(filters) + snapshots, _, err := cloud.ListSnapshots(ctx, filters) if err != nil { klog.Errorf("Failed to query for existing Snapshot during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get snapshots") @@ -777,7 +777,7 @@ func (cs *controllerServer) createSnapshot(cloud openstack.IOpenStack, name stri } // TODO: Delegate the check to openstack itself and ignore the conflict - snap, err = cloud.CreateSnapshot(name, volumeID, properties) + snap, err = cloud.CreateSnapshot(ctx, name, volumeID, properties) if err != nil { klog.Errorf("Failed to Create snapshot: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) @@ -788,7 +788,7 @@ func (cs *controllerServer) createSnapshot(cloud openstack.IOpenStack, name stri return snap, nil } -func (cs *controllerServer) createBackup(cloud openstack.IOpenStack, name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { +func (cs *controllerServer) createBackup(ctx context.Context, cloud openstack.IOpenStack, name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { // Add cluster ID to the snapshot metadata properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} @@ -802,7 +802,7 @@ func (cs *controllerServer) createBackup(cloud openstack.IOpenStack, name string } } - backup, err := cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) + backup, err := cloud.CreateBackup(ctx, name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) if err != nil { klog.Errorf("Failed to Create backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v", err)) @@ -829,9 +829,9 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // If volumeSnapshot object was linked to a cinder backup, delete the backup. - back, err := cloud.GetBackupByID(id) + back, err := cloud.GetBackupByID(ctx, id) if err == nil && back != nil { - err = cloud.DeleteBackup(id) + err = cloud.DeleteBackup(ctx, id) if err != nil { klog.Errorf("Failed to Delete backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteBackup failed with error %v", err)) @@ -839,7 +839,7 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // Delegate the check to openstack itself - err = cloud.DeleteSnapshot(id) + err = cloud.DeleteSnapshot(ctx, id) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s is already deleted.", id) @@ -861,7 +861,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap snapshotID := req.GetSnapshotId() if len(snapshotID) != 0 { - snap, err := cloud.GetSnapshotByID(snapshotID) + snap, err := cloud.GetSnapshotByID(ctx, snapshotID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s not found", snapshotID) @@ -905,7 +905,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap // Only retrieve snapshots that are available filters["Status"] = "available" - slist, nextPageToken, err = cloud.ListSnapshots(filters) + slist, nextPageToken, err = cloud.ListSnapshots(ctx, filters) if err != nil { klog.Errorf("Failed to ListSnapshots: %v", err) return nil, status.Errorf(codes.Internal, "ListSnapshots failed with error %v", err) @@ -964,7 +964,7 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume ID must be provided") } - _, err := cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(ctx, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities Volume %s not found", volumeID) @@ -1008,7 +1008,7 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co var volume *volumes.Volume var err error for _, cloud := range cs.Clouds { - volume, err = cloud.GetVolume(volumeID) + volume, err = cloud.GetVolume(ctx, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { continue @@ -1064,7 +1064,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi return nil, status.Error(codes.OutOfRange, "After round-up, volume size exceeds the limit specified") } - volume, err := cloud.GetVolume(volumeID) + volume, err := cloud.GetVolume(ctx, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") @@ -1081,14 +1081,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } - err = cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) + err = cloud.ExpandVolume(ctx, volumeID, volume.Status, volSizeGB) if err != nil { return nil, status.Errorf(codes.Internal, "Could not resize volume %q to size %v: %v", volumeID, volSizeGB, err) } // we need wait for the volume to be available or InUse, it might be error_extending in some scenario targetStatus := []string{openstack.VolumeAvailableStatus, openstack.VolumeInUseStatus} - err = cloud.WaitVolumeTargetStatus(volumeID, targetStatus) + err = cloud.WaitVolumeTargetStatus(ctx, volumeID, targetStatus) if err != nil { klog.Errorf("Failed to WaitVolumeTargetStatus of volume %s: %v", volumeID, err) return nil, status.Errorf(codes.Internal, "[ControllerExpandVolume] Volume %s not in target state after resize operation: %v", volumeID, err) diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index 26f96dd01b..fbc386edf3 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "context" "fmt" "net/http" "os" @@ -45,35 +46,35 @@ func AddExtraFlags(fs *pflag.FlagSet) { } type IOpenStack interface { - CreateVolume(*volumes.CreateOpts, volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) - DeleteVolume(volumeID string) error - AttachVolume(instanceID, volumeID string) (string, error) - ListVolumes(limit int, startingToken string) ([]volumes.Volume, string, error) - WaitDiskAttached(instanceID string, volumeID string) error - DetachVolume(instanceID, volumeID string) error - WaitDiskDetached(instanceID string, volumeID string) error - WaitVolumeTargetStatus(volumeID string, tStatus []string) error - GetAttachmentDiskPath(instanceID, volumeID string) (string, error) - GetVolume(volumeID string) (*volumes.Volume, error) - GetVolumesByName(name string) ([]volumes.Volume, error) - GetVolumeByName(name string) (*volumes.Volume, error) - CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) - ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) - DeleteSnapshot(snapID string) error - GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) - WaitSnapshotReady(snapshotID string) (string, error) - CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) - ListBackups(filters map[string]string) ([]backups.Backup, error) - DeleteBackup(backupID string) error - GetBackupByID(backupID string) (*backups.Backup, error) + CreateVolume(context.Context, *volumes.CreateOpts, volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) + DeleteVolume(ctx context.Context, volumeID string) error + AttachVolume(ctx context.Context, instanceID, volumeID string) (string, error) + ListVolumes(ctx context.Context, limit int, startingToken string) ([]volumes.Volume, string, error) + WaitDiskAttached(ctx context.Context, instanceID string, volumeID string) error + DetachVolume(ctx context.Context, instanceID, volumeID string) error + WaitDiskDetached(ctx context.Context, instanceID string, volumeID string) error + WaitVolumeTargetStatus(ctx context.Context, volumeID string, tStatus []string) error + GetAttachmentDiskPath(ctx context.Context, instanceID, volumeID string) (string, error) + GetVolume(ctx context.Context, volumeID string) (*volumes.Volume, error) + GetVolumesByName(ctx context.Context, name string) ([]volumes.Volume, error) + GetVolumeByName(ctx context.Context, name string) (*volumes.Volume, error) + CreateSnapshot(ctx context.Context, name, volID string, tags map[string]string) (*snapshots.Snapshot, error) + ListSnapshots(ctx context.Context, filters map[string]string) ([]snapshots.Snapshot, string, error) + DeleteSnapshot(ctx context.Context, snapID string) error + GetSnapshotByID(ctx context.Context, snapshotID string) (*snapshots.Snapshot, error) + WaitSnapshotReady(ctx context.Context, snapshotID string) (string, error) + CreateBackup(ctx context.Context, name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) + ListBackups(ctx context.Context, filters map[string]string) ([]backups.Backup, error) + DeleteBackup(ctx context.Context, backupID string) error + GetBackupByID(ctx context.Context, backupID string) (*backups.Backup, error) BackupsAreEnabled() (bool, error) - WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) - GetInstanceByID(instanceID string) (*servers.Server, error) - ExpandVolume(volumeID string, status string, size int) error + WaitBackupReady(ctx context.Context, backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) + GetInstanceByID(ctx context.Context, instanceID string) (*servers.Server, error) + ExpandVolume(ctx context.Context, volumeID string, status string, size int) error GetMaxVolLimit() int64 GetMetadataOpts() metadata.Opts GetBlockStorageOpts() BlockStorageOpts - ResolveVolumeListToUUIDs(volumes string) (string, error) + ResolveVolumeListToUUIDs(ctx context.Context, volumes string) (string, error) } type OpenStack struct { diff --git a/pkg/csi/cinder/openstack/openstack_backups.go b/pkg/csi/cinder/openstack/openstack_backups.go index e291309b45..764e35c4bc 100644 --- a/pkg/csi/cinder/openstack/openstack_backups.go +++ b/pkg/csi/cinder/openstack/openstack_backups.go @@ -44,7 +44,7 @@ const ( // CreateBackup issues a request to create a Backup from the specified Snapshot with the corresponding ID and // returns the resultant gophercloud Backup Item upon success. -func (os *OpenStack) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { +func (os *OpenStack) CreateBackup(ctx context.Context, name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { blockstorageServiceClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) if err != nil { return &backups.Backup{}, err @@ -79,7 +79,7 @@ func (os *OpenStack) CreateBackup(name, volID, snapshotID, availabilityZone stri // TODO: Do some check before really call openstack API on the input mc := metrics.NewMetricContext("backup", "create") - backup, err := backups.Create(context.TODO(), blockstorageServiceClient, opts).Extract() + backup, err := backups.Create(ctx, blockstorageServiceClient, opts).Extract() if mc.ObserveRequest(err) != nil { return &backups.Backup{}, err } @@ -92,7 +92,7 @@ func (os *OpenStack) CreateBackup(name, volID, snapshotID, availabilityZone stri // provide the ability to provide limit and offset to enable the consumer to provide accurate pagination. // In addition the filters argument provides a mechanism for passing in valid filter strings to the list // operation. Valid filter keys are: Name, Status, VolumeID, Limit, Marker (TenantID has no effect). -func (os *OpenStack) ListBackups(filters map[string]string) ([]backups.Backup, error) { +func (os *OpenStack) ListBackups(ctx context.Context, filters map[string]string) ([]backups.Backup, error) { var allBackups []backups.Backup // Build the Opts @@ -115,7 +115,7 @@ func (os *OpenStack) ListBackups(filters map[string]string) ([]backups.Backup, e } mc := metrics.NewMetricContext("backup", "list") - allPages, err := backups.List(os.blockstorage, opts).AllPages(context.TODO()) + allPages, err := backups.List(os.blockstorage, opts).AllPages(ctx) if err != nil { return nil, err } @@ -132,9 +132,9 @@ func (os *OpenStack) ListBackups(filters map[string]string) ([]backups.Backup, e } // DeleteBackup issues a request to delete the Backup with the specified ID from the Cinder backend. -func (os *OpenStack) DeleteBackup(backupID string) error { +func (os *OpenStack) DeleteBackup(ctx context.Context, backupID string) error { mc := metrics.NewMetricContext("backup", "delete") - err := backups.Delete(context.TODO(), os.blockstorage, backupID).ExtractErr() + err := backups.Delete(ctx, os.blockstorage, backupID).ExtractErr() if mc.ObserveRequest(err) != nil { klog.Errorf("Failed to delete backup: %v", err) } @@ -142,9 +142,9 @@ func (os *OpenStack) DeleteBackup(backupID string) error { } // GetBackupByID returns backup details by id. -func (os *OpenStack) GetBackupByID(backupID string) (*backups.Backup, error) { +func (os *OpenStack) GetBackupByID(ctx context.Context, backupID string) (*backups.Backup, error) { mc := metrics.NewMetricContext("backup", "get") - backup, err := backups.Get(context.TODO(), os.blockstorage, backupID).Extract() + backup, err := backups.Get(ctx, os.blockstorage, backupID).Extract() if mc.ObserveRequest(err) != nil { klog.Errorf("Failed to get backup: %v", err) return nil, err @@ -159,7 +159,7 @@ func (os *OpenStack) BackupsAreEnabled() (bool, error) { // WaitBackupReady waits until backup is ready. It waits longer depending on // the size of the corresponding snapshot. -func (os *OpenStack) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { +func (os *OpenStack) WaitBackupReady(ctx context.Context, backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { var err error duration := time.Duration(backupMaxDurationSecondsPerGB*snapshotSize + backupBaseDurationSeconds) @@ -169,7 +169,7 @@ func (os *OpenStack) WaitBackupReady(backupID string, snapshotSize int, backupMa err = fmt.Errorf("timeout, Backup %s is still not Ready: %v", backupID, err) } - back, _ := os.GetBackupByID(backupID) + back, _ := os.GetBackupByID(ctx, backupID) if back != nil { return back.Status, err @@ -191,7 +191,7 @@ func (os *OpenStack) waitBackupReadyWithContext(backupID string, duration time.D for { select { case <-ticker.C: - done, err = os.backupIsReady(backupID) + done, err = os.backupIsReady(ctx, backupID) if err != nil { return err } @@ -208,8 +208,8 @@ func (os *OpenStack) waitBackupReadyWithContext(backupID string, duration time.D // Supporting function for waitBackupReadyWithContext(). // Returns true when the backup is ready. -func (os *OpenStack) backupIsReady(backupID string) (bool, error) { - backup, err := os.GetBackupByID(backupID) +func (os *OpenStack) backupIsReady(ctx context.Context, backupID string) (bool, error) { + backup, err := os.GetBackupByID(ctx, backupID) if err != nil { return false, err } diff --git a/pkg/csi/cinder/openstack/openstack_instances.go b/pkg/csi/cinder/openstack/openstack_instances.go index a4c093bf75..71d2cfe0a1 100644 --- a/pkg/csi/cinder/openstack/openstack_instances.go +++ b/pkg/csi/cinder/openstack/openstack_instances.go @@ -8,9 +8,9 @@ import ( ) // GetInstanceByID returns server with specified instanceID -func (os *OpenStack) GetInstanceByID(instanceID string) (*servers.Server, error) { +func (os *OpenStack) GetInstanceByID(ctx context.Context, instanceID string) (*servers.Server, error) { mc := metrics.NewMetricContext("server", "get") - server, err := servers.Get(context.TODO(), os.compute, instanceID).Extract() + server, err := servers.Get(ctx, os.compute, instanceID).Extract() if mc.ObserveRequest(err) != nil { return nil, err } diff --git a/pkg/csi/cinder/openstack/openstack_mock.go b/pkg/csi/cinder/openstack/openstack_mock.go index 2fa01876b1..87b5d76cd5 100644 --- a/pkg/csi/cinder/openstack/openstack_mock.go +++ b/pkg/csi/cinder/openstack/openstack_mock.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "context" "fmt" "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/backups" @@ -67,7 +68,7 @@ type OpenStackMock struct { // revive:enable:exported // AttachVolume provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) AttachVolume(instanceID string, volumeID string) (string, error) { +func (_m *OpenStackMock) AttachVolume(ctx context.Context, instanceID string, volumeID string) (string, error) { ret := _m.Called(instanceID, volumeID) var r0 string @@ -88,7 +89,7 @@ func (_m *OpenStackMock) AttachVolume(instanceID string, volumeID string) (strin } // CreateVolume provides a mock function with given fields: name, size, vtype, availability, tags -func (_m *OpenStackMock) CreateVolume(opts *volumes.CreateOpts, _ volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { +func (_m *OpenStackMock) CreateVolume(ctx context.Context, opts *volumes.CreateOpts, _ volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { name := opts.Name size := opts.Size vtype := opts.VolumeType @@ -118,7 +119,7 @@ func (_m *OpenStackMock) CreateVolume(opts *volumes.CreateOpts, _ volumes.Schedu } // DeleteVolume provides a mock function with given fields: volumeID -func (_m *OpenStackMock) DeleteVolume(volumeID string) error { +func (_m *OpenStackMock) DeleteVolume(ctx context.Context, volumeID string) error { ret := _m.Called(volumeID) var r0 error @@ -132,12 +133,12 @@ func (_m *OpenStackMock) DeleteVolume(volumeID string) error { } // GetVolume provides a mock function with given fields: volumeID -func (_m *OpenStackMock) GetVolume(volumeID string) (*volumes.Volume, error) { +func (_m *OpenStackMock) GetVolume(ctx context.Context, volumeID string) (*volumes.Volume, error) { return &fakeVol1, nil } // DetachVolume provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) DetachVolume(instanceID string, volumeID string) error { +func (_m *OpenStackMock) DetachVolume(ctx context.Context, instanceID string, volumeID string) error { ret := _m.Called(instanceID, volumeID) var r0 error @@ -151,7 +152,7 @@ func (_m *OpenStackMock) DetachVolume(instanceID string, volumeID string) error } // GetAttachmentDiskPath provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) GetAttachmentDiskPath(instanceID string, volumeID string) (string, error) { +func (_m *OpenStackMock) GetAttachmentDiskPath(ctx context.Context, instanceID string, volumeID string) (string, error) { ret := _m.Called(instanceID, volumeID) var r0 string @@ -172,7 +173,7 @@ func (_m *OpenStackMock) GetAttachmentDiskPath(instanceID string, volumeID strin } // WaitDiskAttached provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) WaitDiskAttached(instanceID string, volumeID string) error { +func (_m *OpenStackMock) WaitDiskAttached(ctx context.Context, instanceID string, volumeID string) error { ret := _m.Called(instanceID, volumeID) var r0 error @@ -186,7 +187,7 @@ func (_m *OpenStackMock) WaitDiskAttached(instanceID string, volumeID string) er } // WaitVolumeTargetStatus provides a mock function with given fields: volumeID, tStatus -func (_m *OpenStackMock) WaitVolumeTargetStatus(volumeID string, tStatus []string) error { +func (_m *OpenStackMock) WaitVolumeTargetStatus(ctx context.Context, volumeID string, tStatus []string) error { ret := _m.Called(volumeID, tStatus) var r0 error @@ -200,7 +201,7 @@ func (_m *OpenStackMock) WaitVolumeTargetStatus(volumeID string, tStatus []strin } // WaitDiskDetached provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) WaitDiskDetached(instanceID string, volumeID string) error { +func (_m *OpenStackMock) WaitDiskDetached(ctx context.Context, instanceID string, volumeID string) error { ret := _m.Called(instanceID, volumeID) var r0 error @@ -214,7 +215,7 @@ func (_m *OpenStackMock) WaitDiskDetached(instanceID string, volumeID string) er } // GetVolumesByName provides a mock function with given fields: name -func (_m *OpenStackMock) GetVolumesByName(name string) ([]volumes.Volume, error) { +func (_m *OpenStackMock) GetVolumesByName(ctx context.Context, name string) ([]volumes.Volume, error) { ret := _m.Called(name) @@ -238,8 +239,8 @@ func (_m *OpenStackMock) GetVolumesByName(name string) ([]volumes.Volume, error) } // GetVolumeByName provides a mock function with given fields: name -func (_m *OpenStackMock) GetVolumeByName(name string) (*volumes.Volume, error) { - vols, err := _m.GetVolumesByName(name) +func (_m *OpenStackMock) GetVolumeByName(ctx context.Context, name string) (*volumes.Volume, error) { + vols, err := _m.GetVolumesByName(ctx, name) if err != nil { return nil, err } @@ -256,7 +257,7 @@ func (_m *OpenStackMock) GetVolumeByName(name string) (*volumes.Volume, error) { } // ListSnapshots provides a mock function with given fields: limit, offset, filters -func (_m *OpenStackMock) ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) { +func (_m *OpenStackMock) ListSnapshots(ctx context.Context, filters map[string]string) ([]snapshots.Snapshot, string, error) { ret := _m.Called(filters) var r0 []snapshots.Snapshot @@ -288,7 +289,7 @@ func (_m *OpenStackMock) ListSnapshots(filters map[string]string) ([]snapshots.S } // CreateSnapshot provides a mock function with given fields: name, volID, tags -func (_m *OpenStackMock) CreateSnapshot(name string, volID string, tags map[string]string) (*snapshots.Snapshot, error) { +func (_m *OpenStackMock) CreateSnapshot(ctx context.Context, name string, volID string, tags map[string]string) (*snapshots.Snapshot, error) { ret := _m.Called(name, volID, tags) var r0 *snapshots.Snapshot @@ -311,7 +312,7 @@ func (_m *OpenStackMock) CreateSnapshot(name string, volID string, tags map[stri } // DeleteSnapshot provides a mock function with given fields: snapID -func (_m *OpenStackMock) DeleteSnapshot(snapID string) error { +func (_m *OpenStackMock) DeleteSnapshot(ctx context.Context, snapID string) error { ret := _m.Called(snapID) var r0 error @@ -324,7 +325,7 @@ func (_m *OpenStackMock) DeleteSnapshot(snapID string) error { return r0 } -func (_m *OpenStackMock) ListBackups(filters map[string]string) ([]backups.Backup, error) { +func (_m *OpenStackMock) ListBackups(ctx context.Context, filters map[string]string) ([]backups.Backup, error) { ret := _m.Called(filters) var r0 []backups.Backup @@ -345,7 +346,7 @@ func (_m *OpenStackMock) ListBackups(filters map[string]string) ([]backups.Backu return r0, r1 } -func (_m *OpenStackMock) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { +func (_m *OpenStackMock) CreateBackup(ctx context.Context, name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { ret := _m.Called(name, volID, snapshotID, availabilityZone, tags) var r0 *backups.Backup @@ -367,7 +368,7 @@ func (_m *OpenStackMock) CreateBackup(name, volID, snapshotID, availabilityZone return r0, r1 } -func (_m *OpenStackMock) DeleteBackup(backupID string) error { +func (_m *OpenStackMock) DeleteBackup(ctx context.Context, backupID string) error { ret := _m.Called(backupID) var r0 error @@ -381,7 +382,7 @@ func (_m *OpenStackMock) DeleteBackup(backupID string) error { } // ListVolumes provides a mock function without param -func (_m *OpenStackMock) ListVolumes(limit int, marker string) ([]volumes.Volume, string, error) { +func (_m *OpenStackMock) ListVolumes(ctx context.Context, limit int, marker string) ([]volumes.Volume, string, error) { ret := _m.Called(limit, marker) var r0 []volumes.Volume @@ -413,7 +414,7 @@ func (_m *OpenStackMock) ListVolumes(limit int, marker string) ([]volumes.Volume return r0, r1, r2 } -func (_m *OpenStackMock) GetAvailabilityZone() (string, error) { +func (_m *OpenStackMock) GetAvailabilityZone(ctx context.Context) (string, error) { ret := _m.Called() var r0 string if rf, ok := ret.Get(0).(func() string); ok { @@ -436,12 +437,12 @@ func (_m *OpenStackMock) GetInstanceID() (string, error) { return "", nil } -func (_m *OpenStackMock) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) { +func (_m *OpenStackMock) GetSnapshotByID(ctx context.Context, snapshotID string) (*snapshots.Snapshot, error) { return &fakeSnapshot, nil } -func (_m *OpenStackMock) WaitSnapshotReady(snapshotID string) (string, error) { +func (_m *OpenStackMock) WaitSnapshotReady(ctx context.Context, snapshotID string) (string, error) { ret := _m.Called(snapshotID) var r0 string @@ -461,12 +462,12 @@ func (_m *OpenStackMock) WaitSnapshotReady(snapshotID string) (string, error) { return r0, r1 } -func (_m *OpenStackMock) GetBackupByID(backupID string) (*backups.Backup, error) { +func (_m *OpenStackMock) GetBackupByID(ctx context.Context, backupID string) (*backups.Backup, error) { return &fakeBackup, nil } -func (_m *OpenStackMock) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { +func (_m *OpenStackMock) WaitBackupReady(ctx context.Context, backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { ret := _m.Called(backupID) var r0 string @@ -494,12 +495,12 @@ func (_m *OpenStackMock) BackupsAreEnabled() (bool, error) { return true, nil } -func (_m *OpenStackMock) GetInstanceByID(instanceID string) (*servers.Server, error) { +func (_m *OpenStackMock) GetInstanceByID(ctx context.Context, instanceID string) (*servers.Server, error) { return nil, nil } // ExpandVolume provides a mock function with given fields: instanceID, volumeID -func (_m *OpenStackMock) ExpandVolume(volumeID string, status string, size int) error { +func (_m *OpenStackMock) ExpandVolume(ctx context.Context, volumeID string, status string, size int) error { ret := _m.Called(volumeID, status, size) var r0 error @@ -524,6 +525,6 @@ func (_m *OpenStackMock) GetBlockStorageOpts() BlockStorageOpts { } // ResolveVolumeListToUUIDs provides a mock function to return volume UUIDs -func (_m *OpenStackMock) ResolveVolumeListToUUIDs(v string) (string, error) { +func (_m *OpenStackMock) ResolveVolumeListToUUIDs(ctx context.Context, v string) (string, error) { return v, nil } diff --git a/pkg/csi/cinder/openstack/openstack_snapshots.go b/pkg/csi/cinder/openstack/openstack_snapshots.go index 6999f9e790..982916d256 100644 --- a/pkg/csi/cinder/openstack/openstack_snapshots.go +++ b/pkg/csi/cinder/openstack/openstack_snapshots.go @@ -46,7 +46,7 @@ const ( // CreateSnapshot issues a request to take a Snapshot of the specified Volume with the corresponding ID and // returns the resultant gophercloud Snapshot Item upon success -func (os *OpenStack) CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { +func (os *OpenStack) CreateSnapshot(ctx context.Context, name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { force := false // if no flag given, then force will be false by default @@ -72,7 +72,7 @@ func (os *OpenStack) CreateSnapshot(name, volID string, tags map[string]string) } // TODO: Do some check before really call openstack API on the input mc := metrics.NewMetricContext("snapshot", "create") - snap, err := snapshots.Create(context.TODO(), os.blockstorage, opts).Extract() + snap, err := snapshots.Create(ctx, os.blockstorage, opts).Extract() if mc.ObserveRequest(err) != nil { return &snapshots.Snapshot{}, err } @@ -85,7 +85,7 @@ func (os *OpenStack) CreateSnapshot(name, volID string, tags map[string]string) // provide the ability to provide limit and offset to enable the consumer to provide accurate pagination. // In addition the filters argument provides a mechanism for passing in valid filter strings to the list // operation. Valid filter keys are: Name, Status, VolumeID, Limit, Marker (TenantID has no effect) -func (os *OpenStack) ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) { +func (os *OpenStack) ListSnapshots(ctx context.Context, filters map[string]string) ([]snapshots.Snapshot, string, error) { var nextPageToken string var snaps []snapshots.Snapshot @@ -108,7 +108,7 @@ func (os *OpenStack) ListSnapshots(filters map[string]string) ([]snapshots.Snaps } } mc := metrics.NewMetricContext("snapshot", "list") - err := snapshots.List(os.blockstorage, opts).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := snapshots.List(os.blockstorage, opts).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { var err error snaps, err = snapshots.ExtractSnapshots(page) @@ -139,9 +139,9 @@ func (os *OpenStack) ListSnapshots(filters map[string]string) ([]snapshots.Snaps } // DeleteSnapshot issues a request to delete the Snapshot with the specified ID from the Cinder backend -func (os *OpenStack) DeleteSnapshot(snapID string) error { +func (os *OpenStack) DeleteSnapshot(ctx context.Context, snapID string) error { mc := metrics.NewMetricContext("snapshot", "delete") - err := snapshots.Delete(context.TODO(), os.blockstorage, snapID).ExtractErr() + err := snapshots.Delete(ctx, os.blockstorage, snapID).ExtractErr() if mc.ObserveRequest(err) != nil { klog.Errorf("Failed to delete snapshot: %v", err) } @@ -149,9 +149,9 @@ func (os *OpenStack) DeleteSnapshot(snapID string) error { } // GetSnapshotByID returns snapshot details by id -func (os *OpenStack) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) { +func (os *OpenStack) GetSnapshotByID(ctx context.Context, snapshotID string) (*snapshots.Snapshot, error) { mc := metrics.NewMetricContext("snapshot", "get") - s, err := snapshots.Get(context.TODO(), os.blockstorage, snapshotID).Extract() + s, err := snapshots.Get(ctx, os.blockstorage, snapshotID).Extract() if mc.ObserveRequest(err) != nil { klog.Errorf("Failed to get snapshot: %v", err) return nil, err @@ -160,7 +160,7 @@ func (os *OpenStack) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, er } // WaitSnapshotReady waits till snapshot is ready -func (os *OpenStack) WaitSnapshotReady(snapshotID string) (string, error) { +func (os *OpenStack) WaitSnapshotReady(ctx context.Context, snapshotID string) (string, error) { backoff := wait.Backoff{ Duration: snapReadyDuration, Factor: snapReadyFactor, @@ -168,7 +168,7 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) (string, error) { } err := wait.ExponentialBackoff(backoff, func() (bool, error) { - ready, err := os.snapshotIsReady(snapshotID) + ready, err := os.snapshotIsReady(ctx, snapshotID) if err != nil { return false, err } @@ -179,7 +179,7 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) (string, error) { err = fmt.Errorf("timeout, Snapshot %s is still not Ready %v", snapshotID, err) } - snap, _ := os.GetSnapshotByID(snapshotID) + snap, _ := os.GetSnapshotByID(ctx, snapshotID) if snap != nil { return snap.Status, err @@ -188,8 +188,8 @@ func (os *OpenStack) WaitSnapshotReady(snapshotID string) (string, error) { } } -func (os *OpenStack) snapshotIsReady(snapshotID string) (bool, error) { - snap, err := os.GetSnapshotByID(snapshotID) +func (os *OpenStack) snapshotIsReady(ctx context.Context, snapshotID string) (bool, error) { + snap, err := os.GetSnapshotByID(ctx, snapshotID) if err != nil { return false, err } diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go index 5f810e7416..ea6307d2e8 100644 --- a/pkg/csi/cinder/openstack/openstack_volumes.go +++ b/pkg/csi/cinder/openstack/openstack_volumes.go @@ -55,7 +55,7 @@ const ( var volumeErrorStates = [...]string{"error", "error_extending", "error_deleting"} // CreateVolume creates a volume of given size -func (os *OpenStack) CreateVolume(opts *volumes.CreateOpts, schedulerHints volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { +func (os *OpenStack) CreateVolume(ctx context.Context, opts *volumes.CreateOpts, schedulerHints volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { blockstorageClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) if err != nil { return nil, err @@ -69,7 +69,7 @@ func (os *OpenStack) CreateVolume(opts *volumes.CreateOpts, schedulerHints volum mc := metrics.NewMetricContext("volume", "create") opts.Description = volumeDescription - vol, err := volumes.Create(context.TODO(), blockstorageClient, opts, schedulerHints).Extract() + vol, err := volumes.Create(ctx, blockstorageClient, opts, schedulerHints).Extract() if mc.ObserveRequest(err) != nil { return nil, err } @@ -78,13 +78,13 @@ func (os *OpenStack) CreateVolume(opts *volumes.CreateOpts, schedulerHints volum } // ListVolumes list all the volumes -func (os *OpenStack) ListVolumes(limit int, startingToken string) ([]volumes.Volume, string, error) { +func (os *OpenStack) ListVolumes(ctx context.Context, limit int, startingToken string) ([]volumes.Volume, string, error) { var nextPageToken string var vols []volumes.Volume opts := volumes.ListOpts{Limit: limit, Marker: startingToken} mc := metrics.NewMetricContext("volume", "list") - err := volumes.List(os.blockstorage, opts).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := volumes.List(os.blockstorage, opts).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { var err error vols, err = volumes.ExtractVolumes(page) @@ -116,7 +116,7 @@ func (os *OpenStack) ListVolumes(limit int, startingToken string) ([]volumes.Vol // GetVolumesByName is a wrapper around ListVolumes that creates a Name filter to act as a GetByName // Returns a list of Volume references with the specified name -func (os *OpenStack) GetVolumesByName(n string) ([]volumes.Volume, error) { +func (os *OpenStack) GetVolumesByName(ctx context.Context, n string) ([]volumes.Volume, error) { // Init a local thread safe copy of the Cinder ServiceClient blockstorageClient, err := openstack.NewBlockStorageV3(os.blockstorage.ProviderClient, os.epOpts) if err != nil { @@ -131,7 +131,7 @@ func (os *OpenStack) GetVolumesByName(n string) ([]volumes.Volume, error) { opts := volumes.ListOpts{Name: n} mc := metrics.NewMetricContext("volume", "list") - pages, err := volumes.List(blockstorageClient, opts).AllPages(context.TODO()) + pages, err := volumes.List(blockstorageClient, opts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } @@ -146,8 +146,8 @@ func (os *OpenStack) GetVolumesByName(n string) ([]volumes.Volume, error) { // GetVolumeByName is a wrapper around GetVolumesByName that returns a single Volume reference // with the specified name -func (os *OpenStack) GetVolumeByName(n string) (*volumes.Volume, error) { - vols, err := os.GetVolumesByName(n) +func (os *OpenStack) GetVolumeByName(ctx context.Context, n string) (*volumes.Volume, error) { + vols, err := os.GetVolumesByName(ctx, n) if err != nil { return nil, err } @@ -164,8 +164,8 @@ func (os *OpenStack) GetVolumeByName(n string) (*volumes.Volume, error) { } // DeleteVolume delete a volume -func (os *OpenStack) DeleteVolume(volumeID string) error { - used, err := os.diskIsUsed(volumeID) +func (os *OpenStack) DeleteVolume(ctx context.Context, volumeID string) error { + used, err := os.diskIsUsed(ctx, volumeID) if err != nil { return err } @@ -174,14 +174,14 @@ func (os *OpenStack) DeleteVolume(volumeID string) error { } mc := metrics.NewMetricContext("volume", "delete") - err = volumes.Delete(context.TODO(), os.blockstorage, volumeID, nil).ExtractErr() + err = volumes.Delete(ctx, os.blockstorage, volumeID, nil).ExtractErr() return mc.ObserveRequest(err) } // GetVolume retrieves Volume by its ID. -func (os *OpenStack) GetVolume(volumeID string) (*volumes.Volume, error) { +func (os *OpenStack) GetVolume(ctx context.Context, volumeID string) (*volumes.Volume, error) { mc := metrics.NewMetricContext("volume", "get") - vol, err := volumes.Get(context.TODO(), os.blockstorage, volumeID).Extract() + vol, err := volumes.Get(ctx, os.blockstorage, volumeID).Extract() if mc.ObserveRequest(err) != nil { return nil, err } @@ -190,10 +190,10 @@ func (os *OpenStack) GetVolume(volumeID string) (*volumes.Volume, error) { } // AttachVolume attaches given cinder volume to the compute -func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { +func (os *OpenStack) AttachVolume(ctx context.Context, instanceID, volumeID string) (string, error) { computeServiceClient := os.compute - volume, err := os.GetVolume(volumeID) + volume, err := os.GetVolume(ctx, volumeID) if err != nil { return "", err } @@ -216,7 +216,7 @@ func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { } mc := metrics.NewMetricContext("volume", "attach") - _, err = volumeattach.Create(context.TODO(), computeServiceClient, instanceID, &volumeattach.CreateOpts{ + _, err = volumeattach.Create(ctx, computeServiceClient, instanceID, &volumeattach.CreateOpts{ VolumeID: volume.ID, }).Extract() @@ -228,7 +228,7 @@ func (os *OpenStack) AttachVolume(instanceID, volumeID string) (string, error) { } // WaitDiskAttached waits for attached -func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error { +func (os *OpenStack) WaitDiskAttached(ctx context.Context, instanceID string, volumeID string) error { backoff := wait.Backoff{ Duration: diskAttachInitDelay, Factor: diskAttachFactor, @@ -236,7 +236,7 @@ func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error } err := wait.ExponentialBackoff(backoff, func() (bool, error) { - attached, err := os.diskIsAttached(instanceID, volumeID) + attached, err := os.diskIsAttached(ctx, instanceID, volumeID) if err != nil && !cpoerrors.IsNotFound(err) { // if this is a race condition indicate the volume is deleted // during sleep phase, ignore the error and return attach=false @@ -253,7 +253,7 @@ func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error } // WaitVolumeTargetStatus waits for volume to be in target state -func (os *OpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) error { +func (os *OpenStack) WaitVolumeTargetStatus(ctx context.Context, volumeID string, tStatus []string) error { backoff := wait.Backoff{ Duration: operationFinishInitDelay, Factor: operationFinishFactor, @@ -261,7 +261,7 @@ func (os *OpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) e } waitErr := wait.ExponentialBackoff(backoff, func() (bool, error) { - vol, err := os.GetVolume(volumeID) + vol, err := os.GetVolume(ctx, volumeID) if err != nil { return false, err } @@ -286,8 +286,8 @@ func (os *OpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) e } // DetachVolume detaches given cinder volume from the compute -func (os *OpenStack) DetachVolume(instanceID, volumeID string) error { - volume, err := os.GetVolume(volumeID) +func (os *OpenStack) DetachVolume(ctx context.Context, instanceID, volumeID string) error { + volume, err := os.GetVolume(ctx, volumeID) if err != nil { return err } @@ -304,7 +304,7 @@ func (os *OpenStack) DetachVolume(instanceID, volumeID string) error { for _, att := range volume.Attachments { if att.ServerID == instanceID { mc := metrics.NewMetricContext("volume", "detach") - err = volumeattach.Delete(context.TODO(), os.compute, instanceID, volume.ID).ExtractErr() + err = volumeattach.Delete(ctx, os.compute, instanceID, volume.ID).ExtractErr() if mc.ObserveRequest(err) != nil { return fmt.Errorf("failed to detach volume %s from compute %s : %v", volume.ID, instanceID, err) } @@ -318,7 +318,7 @@ func (os *OpenStack) DetachVolume(instanceID, volumeID string) error { } // WaitDiskDetached waits for detached -func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error { +func (os *OpenStack) WaitDiskDetached(ctx context.Context, instanceID string, volumeID string) error { backoff := wait.Backoff{ Duration: diskDetachInitDelay, Factor: diskDetachFactor, @@ -326,7 +326,7 @@ func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error } err := wait.ExponentialBackoff(backoff, func() (bool, error) { - attached, err := os.diskIsAttached(instanceID, volumeID) + attached, err := os.diskIsAttached(ctx, instanceID, volumeID) if err != nil { return false, err } @@ -341,8 +341,8 @@ func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error } // GetAttachmentDiskPath gets device path of attached volume to the compute -func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) { - volume, err := os.GetVolume(volumeID) +func (os *OpenStack) GetAttachmentDiskPath(ctx context.Context, instanceID, volumeID string) (string, error) { + volume, err := os.GetVolume(ctx, volumeID) if err != nil { return "", err } @@ -362,7 +362,7 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, } // ExpandVolume expands the volume to new size -func (os *OpenStack) ExpandVolume(volumeID string, status string, newSize int) error { +func (os *OpenStack) ExpandVolume(ctx context.Context, volumeID string, status string, newSize int) error { extendOpts := volumes.ExtendSizeOpts{ NewSize: newSize, } @@ -386,10 +386,10 @@ func (os *OpenStack) ExpandVolume(volumeID string, status string, newSize int) e blockstorageClient.Microversion = "3.42" mc := metrics.NewMetricContext("volume", "expand") - return mc.ObserveRequest(volumes.ExtendSize(context.TODO(), blockstorageClient, volumeID, extendOpts).ExtractErr()) + return mc.ObserveRequest(volumes.ExtendSize(ctx, blockstorageClient, volumeID, extendOpts).ExtractErr()) case VolumeAvailableStatus: mc := metrics.NewMetricContext("volume", "expand") - return mc.ObserveRequest(volumes.ExtendSize(context.TODO(), os.blockstorage, volumeID, extendOpts).ExtractErr()) + return mc.ObserveRequest(volumes.ExtendSize(ctx, os.blockstorage, volumeID, extendOpts).ExtractErr()) } // cinder volume can not be expanded when volume status is not volumeInUseStatus or not volumeAvailableStatus @@ -406,8 +406,8 @@ func (os *OpenStack) GetMaxVolLimit() int64 { } // diskIsAttached queries if a volume is attached to a compute instance -func (os *OpenStack) diskIsAttached(instanceID, volumeID string) (bool, error) { - volume, err := os.GetVolume(volumeID) +func (os *OpenStack) diskIsAttached(ctx context.Context, instanceID, volumeID string) (bool, error) { + volume, err := os.GetVolume(ctx, volumeID) if err != nil { return false, err } @@ -420,8 +420,8 @@ func (os *OpenStack) diskIsAttached(instanceID, volumeID string) (bool, error) { } // diskIsUsed returns true a disk is attached to any node. -func (os *OpenStack) diskIsUsed(volumeID string) (bool, error) { - volume, err := os.GetVolume(volumeID) +func (os *OpenStack) diskIsUsed(ctx context.Context, volumeID string) (bool, error) { + volume, err := os.GetVolume(ctx, volumeID) if err != nil { return false, err } @@ -440,7 +440,7 @@ func (os *OpenStack) GetBlockStorageOpts() BlockStorageOpts { // ResolveVolumeListToUUIDs resolves a list of volume names or UUIDs to a // string of UUIDs -func (os *OpenStack) ResolveVolumeListToUUIDs(affinityList string) (string, error) { +func (os *OpenStack) ResolveVolumeListToUUIDs(ctx context.Context, affinityList string) (string, error) { list := util.SplitTrim(affinityList, ',') if len(list) == 0 { return "", nil @@ -453,13 +453,13 @@ func (os *OpenStack) ResolveVolumeListToUUIDs(affinityList string) (string, erro if id, e := util.UUID(v); e == nil { // First try to get volume by ID - volume, err = os.GetVolume(id) + volume, err = os.GetVolume(ctx, id) if err != nil && cpoerrors.IsNotFound(err) { - volume, err = os.GetVolumeByName(v) + volume, err = os.GetVolumeByName(ctx, v) } } else { // If not a UUID, try to get volume by name - volume, err = os.GetVolumeByName(v) + volume, err = os.GetVolumeByName(ctx, v) } if err != nil { if cpoerrors.IsNotFound(err) { diff --git a/pkg/csi/manila/controllerserver.go b/pkg/csi/manila/controllerserver.go index ea891f926e..3047b9cc71 100644 --- a/pkg/csi/manila/controllerserver.go +++ b/pkg/csi/manila/controllerserver.go @@ -114,7 +114,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } defer pendingVolumes.Delete(req.GetName()) - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } @@ -154,11 +154,11 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol klog.V(4).Infof("CreateVolume: Getting scheduler hints: affinity=%s, anti-affinity=%s", affinity, antiAffinity) // resolve share names to UUIDs - shareOpts.Affinity, err = resolveShareListToUUIDs(manilaClient, affinity) + shareOpts.Affinity, err = resolveShareListToUUIDs(ctx, manilaClient, affinity) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to resolve affinity share UUIDs: %v", err) } - shareOpts.AntiAffinity, err = resolveShareListToUUIDs(manilaClient, antiAffinity) + shareOpts.AntiAffinity, err = resolveShareListToUUIDs(ctx, manilaClient, antiAffinity) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "failed to resolve anti-affinity share UUIDs: %v", err) } @@ -179,7 +179,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol return nil, err } - share, err := volCreator.create(manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata) + share, err := volCreator.create(ctx, manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata) if err != nil { return nil, err } @@ -193,7 +193,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol ad := getShareAdapter(shareOpts.Protocol) - accessRight, err := ad.GetOrGrantAccess(&shareadapters.GrantAccessArgs{Share: share, ManilaClient: manilaClient, Options: shareOpts}) + accessRight, err := ad.GetOrGrantAccess(ctx, &shareadapters.GrantAccessArgs{Share: share, ManilaClient: manilaClient, Options: shareOpts}) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for access rule %s for volume %s to become available", accessRight.ID, share.Name) @@ -235,12 +235,12 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol return nil, status.Errorf(codes.InvalidArgument, "invalid OpenStack secrets: %v", err) } - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - if err := deleteShare(manilaClient, req.GetVolumeId()); err != nil { + if err := deleteShare(ctx, manilaClient, req.GetVolumeId()); err != nil { return nil, status.Errorf(codes.Internal, "failed to delete volume %s: %v", req.GetVolumeId(), err) } @@ -265,14 +265,14 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } defer pendingSnapshots.Delete(req.GetName()) - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } // Retrieve the source share - sourceShare, err := manilaClient.GetShareByID(req.GetSourceVolumeId()) + sourceShare, err := manilaClient.GetShareByID(ctx, req.GetSourceVolumeId()) if err != nil { if clouderrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "failed to create snapshot %s for volume %s because the volume doesn't exist: %v", req.GetName(), req.GetSourceVolumeId(), err) @@ -299,7 +299,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // Retrieve an existing snapshot or create a new one - snapshot, err := getOrCreateSnapshot(manilaClient, req.GetName(), sourceShare.ID) + snapshot, err := getOrCreateSnapshot(ctx, manilaClient, req.GetName(), sourceShare.ID) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for snapshot %s of volume %s to become available", snapshot.ID, req.GetSourceVolumeId()) @@ -327,9 +327,9 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS readyToUse = true case snapshotError: // An error occurred, try to roll-back the snapshot - tryDeleteSnapshot(manilaClient, snapshot) + tryDeleteSnapshot(ctx, manilaClient, snapshot) - manilaErrMsg, err := lastResourceError(manilaClient, snapshot.ID) + manilaErrMsg, err := lastResourceError(ctx, manilaClient, snapshot.ID) if err != nil { return nil, status.Errorf(codes.Internal, "snapshot %s of volume %s is in error state, error description could not be retrieved: %v", snapshot.ID, req.GetSourceVolumeId(), err) } @@ -367,12 +367,12 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS return nil, status.Errorf(codes.InvalidArgument, "invalid OpenStack secrets: %v", err) } - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - if err := deleteSnapshot(manilaClient, req.GetSnapshotId()); err != nil { + if err := deleteSnapshot(ctx, manilaClient, req.GetSnapshotId()); err != nil { return nil, status.Errorf(codes.Internal, "failed to delete snapshot %s: %v", req.GetSnapshotId(), err) } @@ -409,12 +409,12 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req } } - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } - share, err := manilaClient.GetShareByID(req.GetVolumeId()) + share, err := manilaClient.GetShareByID(ctx, req.GetVolumeId()) if err != nil { if clouderrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "volume %s not found: %v", req.GetVolumeId(), err) @@ -476,14 +476,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi return nil, status.Errorf(codes.InvalidArgument, "invalid OpenStack secrets: %v", err) } - manilaClient, err := cs.d.manilaClientBuilder.New(osOpts) + manilaClient, err := cs.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } // Retrieve the share by its ID - share, err := manilaClient.GetShareByID(req.GetVolumeId()) + share, err := manilaClient.GetShareByID(ctx, req.GetVolumeId()) if err != nil { if clouderrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "volume %s not found: %v", req.GetVolumeId(), err) @@ -510,7 +510,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } - share, err = extendShare(manilaClient, share.ID, desiredSizeInGiB) + share, err = extendShare(ctx, manilaClient, share.ID, desiredSizeInGiB) if err != nil { return nil, err } diff --git a/pkg/csi/manila/manilaclient/builder.go b/pkg/csi/manila/manilaclient/builder.go index fa2b53d62d..762c615980 100644 --- a/pkg/csi/manila/manilaclient/builder.go +++ b/pkg/csi/manila/manilaclient/builder.go @@ -42,11 +42,11 @@ type ClientBuilder struct { ExtraUserAgentData []string } -func (cb *ClientBuilder) New(o *client.AuthOpts) (Interface, error) { - return New(o, cb.UserAgent, cb.ExtraUserAgentData) +func (cb *ClientBuilder) New(ctx context.Context, o *client.AuthOpts) (Interface, error) { + return New(ctx, o, cb.UserAgent, cb.ExtraUserAgentData) } -func New(o *client.AuthOpts, userAgent string, extraUserAgentData []string) (*Client, error) { +func New(ctx context.Context, o *client.AuthOpts, userAgent string, extraUserAgentData []string) (*Client, error) { // Authenticate and create Manila v2 client provider, err := client.NewOpenStackClient(o, userAgent, extraUserAgentData...) if err != nil { @@ -64,7 +64,7 @@ func New(o *client.AuthOpts, userAgent string, extraUserAgentData []string) (*Cl // Check client's and server's versions for compatibility client.Microversion = minimumManilaVersion - if err = validateManilaClient(client); err != nil { + if err = validateManilaClient(ctx, client); err != nil { return nil, fmt.Errorf("Manila v2 client validation failed: %v", err) } @@ -98,8 +98,8 @@ func compareManilaVersionsLessThan(a, b string) bool { return aMaj < bMaj || (aMaj == bMaj && aMin < bMin) } -func validateManilaClient(c *gophercloud.ServiceClient) error { - serverVersion, err := apiversions.Get(context.TODO(), c, "v2").Extract() +func validateManilaClient(ctx context.Context, c *gophercloud.ServiceClient) error { + serverVersion, err := apiversions.Get(ctx, c, "v2").Extract() if err != nil { return fmt.Errorf("failed to get Manila v2 API microversions: %v", err) } diff --git a/pkg/csi/manila/manilaclient/client.go b/pkg/csi/manila/manilaclient/client.go index 7e441bae7d..24f0d7fbfa 100644 --- a/pkg/csi/manila/manilaclient/client.go +++ b/pkg/csi/manila/manilaclient/client.go @@ -41,74 +41,74 @@ func (c Client) SetMicroversion(version string) { c.c.Microversion = version } -func (c Client) GetShareByID(shareID string) (*shares.Share, error) { - return shares.Get(context.TODO(), c.c, shareID).Extract() +func (c Client) GetShareByID(ctx context.Context, shareID string) (*shares.Share, error) { + return shares.Get(ctx, c.c, shareID).Extract() } -func (c Client) GetShareByName(shareName string) (*shares.Share, error) { - shareID, err := shares_utils.IDFromName(context.TODO(), c.c, shareName) +func (c Client) GetShareByName(ctx context.Context, shareName string) (*shares.Share, error) { + shareID, err := shares_utils.IDFromName(ctx, c.c, shareName) if err != nil { return nil, err } - return shares.Get(context.TODO(), c.c, shareID).Extract() + return shares.Get(ctx, c.c, shareID).Extract() } -func (c Client) CreateShare(opts shares.CreateOptsBuilder) (*shares.Share, error) { - return shares.Create(context.TODO(), c.c, opts).Extract() +func (c Client) CreateShare(ctx context.Context, opts shares.CreateOptsBuilder) (*shares.Share, error) { + return shares.Create(ctx, c.c, opts).Extract() } -func (c Client) DeleteShare(shareID string) error { - return shares.Delete(context.TODO(), c.c, shareID).ExtractErr() +func (c Client) DeleteShare(ctx context.Context, shareID string) error { + return shares.Delete(ctx, c.c, shareID).ExtractErr() } -func (c Client) ExtendShare(shareID string, opts shares.ExtendOptsBuilder) error { - return shares.Extend(context.TODO(), c.c, shareID, opts).ExtractErr() +func (c Client) ExtendShare(ctx context.Context, shareID string, opts shares.ExtendOptsBuilder) error { + return shares.Extend(ctx, c.c, shareID, opts).ExtractErr() } -func (c Client) GetExportLocations(shareID string) ([]shares.ExportLocation, error) { - return shares.ListExportLocations(context.TODO(), c.c, shareID).Extract() +func (c Client) GetExportLocations(ctx context.Context, shareID string) ([]shares.ExportLocation, error) { + return shares.ListExportLocations(ctx, c.c, shareID).Extract() } -func (c Client) SetShareMetadata(shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) { - return shares.SetMetadata(context.TODO(), c.c, shareID, opts).Extract() +func (c Client) SetShareMetadata(ctx context.Context, shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) { + return shares.SetMetadata(ctx, c.c, shareID, opts).Extract() } -func (c Client) GetAccessRights(shareID string) ([]shares.AccessRight, error) { - return shares.ListAccessRights(context.TODO(), c.c, shareID).Extract() +func (c Client) GetAccessRights(ctx context.Context, shareID string) ([]shares.AccessRight, error) { + return shares.ListAccessRights(ctx, c.c, shareID).Extract() } -func (c Client) GrantAccess(shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) { - return shares.GrantAccess(context.TODO(), c.c, shareID, opts).Extract() +func (c Client) GrantAccess(ctx context.Context, shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) { + return shares.GrantAccess(ctx, c.c, shareID, opts).Extract() } -func (c Client) GetSnapshotByID(snapID string) (*snapshots.Snapshot, error) { - return snapshots.Get(context.TODO(), c.c, snapID).Extract() +func (c Client) GetSnapshotByID(ctx context.Context, snapID string) (*snapshots.Snapshot, error) { + return snapshots.Get(ctx, c.c, snapID).Extract() } -func (c Client) GetSnapshotByName(snapName string) (*snapshots.Snapshot, error) { - snapID, err := snapshots_utils.IDFromName(context.TODO(), c.c, snapName) +func (c Client) GetSnapshotByName(ctx context.Context, snapName string) (*snapshots.Snapshot, error) { + snapID, err := snapshots_utils.IDFromName(ctx, c.c, snapName) if err != nil { return nil, err } - return snapshots.Get(context.TODO(), c.c, snapID).Extract() + return snapshots.Get(ctx, c.c, snapID).Extract() } -func (c Client) CreateSnapshot(opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) { - return snapshots.Create(context.TODO(), c.c, opts).Extract() +func (c Client) CreateSnapshot(ctx context.Context, opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) { + return snapshots.Create(ctx, c.c, opts).Extract() } -func (c Client) DeleteSnapshot(snapID string) error { - return snapshots.Delete(context.TODO(), c.c, snapID).ExtractErr() +func (c Client) DeleteSnapshot(ctx context.Context, snapID string) error { + return snapshots.Delete(ctx, c.c, snapID).ExtractErr() } -func (c Client) GetExtraSpecs(shareTypeID string) (sharetypes.ExtraSpecs, error) { - return sharetypes.GetExtraSpecs(context.TODO(), c.c, shareTypeID).Extract() +func (c Client) GetExtraSpecs(ctx context.Context, shareTypeID string) (sharetypes.ExtraSpecs, error) { + return sharetypes.GetExtraSpecs(ctx, c.c, shareTypeID).Extract() } -func (c Client) GetShareTypes() ([]sharetypes.ShareType, error) { - allPages, err := sharetypes.List(c.c, sharetypes.ListOpts{}).AllPages(context.TODO()) +func (c Client) GetShareTypes(ctx context.Context) ([]sharetypes.ShareType, error) { + allPages, err := sharetypes.List(c.c, sharetypes.ListOpts{}).AllPages(ctx) if err != nil { return nil, err } @@ -116,12 +116,12 @@ func (c Client) GetShareTypes() ([]sharetypes.ShareType, error) { return sharetypes.ExtractShareTypes(allPages) } -func (c Client) GetShareTypeIDFromName(shareTypeName string) (string, error) { - return sharetypes_utils.IDFromName(context.TODO(), c.c, shareTypeName) +func (c Client) GetShareTypeIDFromName(ctx context.Context, shareTypeName string) (string, error) { + return sharetypes_utils.IDFromName(ctx, c.c, shareTypeName) } -func (c Client) GetUserMessages(opts messages.ListOptsBuilder) ([]messages.Message, error) { - allPages, err := messages.List(c.c, opts).AllPages(context.TODO()) +func (c Client) GetUserMessages(ctx context.Context, opts messages.ListOptsBuilder) ([]messages.Message, error) { + allPages, err := messages.List(c.c, opts).AllPages(ctx) if err != nil { return nil, err } diff --git a/pkg/csi/manila/manilaclient/interface.go b/pkg/csi/manila/manilaclient/interface.go index 5ed276f2e7..a0ec8c7aa6 100644 --- a/pkg/csi/manila/manilaclient/interface.go +++ b/pkg/csi/manila/manilaclient/interface.go @@ -17,6 +17,8 @@ limitations under the License. package manilaclient import ( + "context" + "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/messages" "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/shares" "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/sharetypes" @@ -28,31 +30,31 @@ type Interface interface { GetMicroversion() string SetMicroversion(version string) - GetShareByID(shareID string) (*shares.Share, error) - GetShareByName(shareName string) (*shares.Share, error) - CreateShare(opts shares.CreateOptsBuilder) (*shares.Share, error) - DeleteShare(shareID string) error - ExtendShare(shareID string, opts shares.ExtendOptsBuilder) error + GetShareByID(ctx context.Context, shareID string) (*shares.Share, error) + GetShareByName(ctx context.Context, shareName string) (*shares.Share, error) + CreateShare(ctx context.Context, opts shares.CreateOptsBuilder) (*shares.Share, error) + DeleteShare(ctx context.Context, shareID string) error + ExtendShare(ctx context.Context, shareID string, opts shares.ExtendOptsBuilder) error - GetExportLocations(shareID string) ([]shares.ExportLocation, error) + GetExportLocations(ctx context.Context, shareID string) ([]shares.ExportLocation, error) - SetShareMetadata(shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) + SetShareMetadata(ctx context.Context, shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) - GetAccessRights(shareID string) ([]shares.AccessRight, error) - GrantAccess(shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) + GetAccessRights(ctx context.Context, shareID string) ([]shares.AccessRight, error) + GrantAccess(ctx context.Context, shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) - GetSnapshotByID(snapID string) (*snapshots.Snapshot, error) - GetSnapshotByName(snapName string) (*snapshots.Snapshot, error) - CreateSnapshot(opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) - DeleteSnapshot(snapID string) error + GetSnapshotByID(ctx context.Context, snapID string) (*snapshots.Snapshot, error) + GetSnapshotByName(ctx context.Context, snapName string) (*snapshots.Snapshot, error) + CreateSnapshot(ctx context.Context, opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) + DeleteSnapshot(ctx context.Context, snapID string) error - GetExtraSpecs(shareTypeID string) (sharetypes.ExtraSpecs, error) - GetShareTypes() ([]sharetypes.ShareType, error) - GetShareTypeIDFromName(shareTypeName string) (string, error) + GetExtraSpecs(ctx context.Context, shareTypeID string) (sharetypes.ExtraSpecs, error) + GetShareTypes(ctx context.Context) ([]sharetypes.ShareType, error) + GetShareTypeIDFromName(ctx context.Context, shareTypeName string) (string, error) - GetUserMessages(opts messages.ListOptsBuilder) ([]messages.Message, error) + GetUserMessages(ctx context.Context, opts messages.ListOptsBuilder) ([]messages.Message, error) } type Builder interface { - New(o *client.AuthOpts) (Interface, error) + New(ctx context.Context, o *client.AuthOpts) (Interface, error) } diff --git a/pkg/csi/manila/nodeserver.go b/pkg/csi/manila/nodeserver.go index 0c9da1824c..6e8790cadb 100644 --- a/pkg/csi/manila/nodeserver.go +++ b/pkg/csi/manila/nodeserver.go @@ -47,10 +47,10 @@ type stageCacheEntry struct { publishSecret map[string]string } -func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.NodeVolumeContext, osOpts *client.AuthOpts) ( +func (ns *nodeServer) buildVolumeContext(ctx context.Context, volID volumeID, shareOpts *options.NodeVolumeContext, osOpts *client.AuthOpts) ( volumeContext map[string]string, accessRight *shares.AccessRight, err error, ) { - manilaClient, err := ns.d.manilaClientBuilder.New(osOpts) + manilaClient, err := ns.d.manilaClientBuilder.New(ctx, osOpts) if err != nil { return nil, nil, status.Errorf(codes.Unauthenticated, "failed to create Manila v2 client: %v", err) } @@ -60,7 +60,7 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node var share *shares.Share if shareOpts.ShareID != "" { - share, err = manilaClient.GetShareByID(shareOpts.ShareID) + share, err = manilaClient.GetShareByID(ctx, shareOpts.ShareID) if err != nil { errCode := codes.Internal if clouderrors.IsNotFound(err) { @@ -70,7 +70,7 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node return nil, nil, status.Errorf(errCode, "failed to retrieve volume with share ID %s: %v", shareOpts.ShareID, err) } } else { - share, err = manilaClient.GetShareByName(shareOpts.ShareName) + share, err = manilaClient.GetShareByName(ctx, shareOpts.ShareName) if err != nil { errCode := codes.Internal if clouderrors.IsNotFound(err) { @@ -100,7 +100,7 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node // Get the access right for this share - accessRights, err := manilaClient.GetAccessRights(share.ID) + accessRights, err := manilaClient.GetAccessRights(ctx, share.ID) if err != nil { return nil, nil, status.Errorf(codes.Internal, "failed to list access rights for volume %s: %v", volID, err) } @@ -120,7 +120,7 @@ func (ns *nodeServer) buildVolumeContext(volID volumeID, shareOpts *options.Node // Retrieve list of all export locations for this share. // Share adapter will try to choose the correct one for mounting. - availableExportLocations, err := manilaClient.GetExportLocations(share.ID) + availableExportLocations, err := manilaClient.GetExportLocations(ctx, share.ID) if err != nil { return nil, nil, status.Errorf(codes.Internal, "failed to list export locations for volume %s: %v", volID, err) } @@ -199,13 +199,13 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis volumeCtx, secret = cacheEntry.volumeContext, cacheEntry.publishSecret } else { klog.Warningf("STAGE_UNSTAGE_VOLUME capability is enabled, but node stage cache doesn't contain an entry for %s - this is most likely a bug! Rebuilding staging data anyway...", volID) - volumeCtx, accessRight, err = ns.buildVolumeContext(volID, shareOpts, osOpts) + volumeCtx, accessRight, err = ns.buildVolumeContext(ctx, volID, shareOpts, osOpts) if err == nil { secret, err = buildNodePublishSecret(accessRight, getShareAdapter(ns.d.shareProto), volID) } } } else { - volumeCtx, accessRight, err = ns.buildVolumeContext(volID, shareOpts, osOpts) + volumeCtx, accessRight, err = ns.buildVolumeContext(ctx, volID, shareOpts, osOpts) if err == nil { secret, err = buildNodePublishSecret(accessRight, getShareAdapter(ns.d.shareProto), volID) } @@ -272,7 +272,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol if cacheEntry, ok := ns.nodeStageCache[volID]; ok { volumeCtx, stageSecret = cacheEntry.volumeContext, cacheEntry.stageSecret } else { - volumeCtx, accessRight, err = ns.buildVolumeContext(volID, shareOpts, osOpts) + volumeCtx, accessRight, err = ns.buildVolumeContext(ctx, volID, shareOpts, osOpts) if err == nil { stageSecret, err = buildNodeStageSecret(accessRight, getShareAdapter(ns.d.shareProto), volID) diff --git a/pkg/csi/manila/share.go b/pkg/csi/manila/share.go index 9e2cca3b3d..f72441ed41 100644 --- a/pkg/csi/manila/share.go +++ b/pkg/csi/manila/share.go @@ -17,6 +17,7 @@ limitations under the License. package manila import ( + "context" "fmt" "strings" "time" @@ -62,7 +63,7 @@ func isShareInErrorState(s string) bool { // getOrCreateShare first retrieves an existing share with name=shareName, or creates a new one if it doesn't exist yet. // Once the share is created, an exponential back-off is used to wait till the status of the share is "available". -func getOrCreateShare(manilaClient manilaclient.Interface, shareName string, createOpts *shares.CreateOpts) (*shares.Share, manilaError, error) { +func getOrCreateShare(ctx context.Context, manilaClient manilaclient.Interface, shareName string, createOpts *shares.CreateOpts) (*shares.Share, manilaError, error) { var ( share *shares.Share err error @@ -70,12 +71,12 @@ func getOrCreateShare(manilaClient manilaclient.Interface, shareName string, cre // First, check if the share already exists or needs to be created - if share, err = manilaClient.GetShareByName(shareName); err != nil { + if share, err = manilaClient.GetShareByName(ctx, shareName); err != nil { if clouderrors.IsNotFound(err) { // It doesn't exist, create it var createErr error - if share, createErr = manilaClient.CreateShare(createOpts); createErr != nil { + if share, createErr = manilaClient.CreateShare(ctx, createOpts); createErr != nil { return nil, 0, createErr } } else { @@ -92,11 +93,11 @@ func getOrCreateShare(manilaClient manilaclient.Interface, shareName string, cre return share, 0, nil } - return waitForShareStatus(manilaClient, share.ID, []string{shareCreating, shareCreatingFromSnapshot}, shareAvailable, false) + return waitForShareStatus(ctx, manilaClient, share.ID, []string{shareCreating, shareCreatingFromSnapshot}, shareAvailable, false) } -func deleteShare(manilaClient manilaclient.Interface, shareID string) error { - if err := manilaClient.DeleteShare(shareID); err != nil { +func deleteShare(ctx context.Context, manilaClient manilaclient.Interface, shareID string) error { + if err := manilaClient.DeleteShare(ctx, shareID); err != nil { if clouderrors.IsNotFound(err) { klog.V(4).Infof("volume with share ID %s not found, assuming it to be already deleted", shareID) } else { @@ -107,33 +108,33 @@ func deleteShare(manilaClient manilaclient.Interface, shareID string) error { return nil } -func tryDeleteShare(manilaClient manilaclient.Interface, share *shares.Share) { +func tryDeleteShare(ctx context.Context, manilaClient manilaclient.Interface, share *shares.Share) { if share == nil { return } - if err := manilaClient.DeleteShare(share.ID); err != nil { + if err := manilaClient.DeleteShare(ctx, share.ID); err != nil { // TODO failure to delete a share in an error state needs proper monitoring support klog.Errorf("couldn't delete volume %s in a roll-back procedure: %v", share.Name, err) return } - _, _, err := waitForShareStatus(manilaClient, share.ID, []string{shareDeleting}, "", true) + _, _, err := waitForShareStatus(ctx, manilaClient, share.ID, []string{shareDeleting}, "", true) if err != nil && !wait.Interrupted(err) { klog.Errorf("couldn't retrieve volume %s in a roll-back procedure: %v", share.Name, err) } } -func extendShare(manilaClient manilaclient.Interface, shareID string, newSizeInGiB int) (*shares.Share, error) { +func extendShare(ctx context.Context, manilaClient manilaclient.Interface, shareID string, newSizeInGiB int) (*shares.Share, error) { opts := shares.ExtendOpts{ NewSize: newSizeInGiB, } - if err := manilaClient.ExtendShare(shareID, opts); err != nil { + if err := manilaClient.ExtendShare(ctx, shareID, opts); err != nil { return nil, err } - share, manilaErrCode, err := waitForShareStatus(manilaClient, shareID, []string{shareExtending}, shareAvailable, false) + share, manilaErrCode, err := waitForShareStatus(ctx, manilaClient, shareID, []string{shareExtending}, shareAvailable, false) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for volume ID %s to become available", share.Name) @@ -145,7 +146,7 @@ func extendShare(manilaClient manilaclient.Interface, shareID string, newSizeInG return share, nil } -func waitForShareStatus(manilaClient manilaclient.Interface, shareID string, validTransientStates []string, desiredStatus string, successOnNotFound bool) (*shares.Share, manilaError, error) { +func waitForShareStatus(ctx context.Context, manilaClient manilaclient.Interface, shareID string, validTransientStates []string, desiredStatus string, successOnNotFound bool) (*shares.Share, manilaError, error) { var ( backoff = wait.Backoff{ Duration: time.Second * waitForAvailableShareTimeout, @@ -168,7 +169,7 @@ func waitForShareStatus(manilaClient manilaclient.Interface, shareID string, val } return share, manilaErrCode, wait.ExponentialBackoff(backoff, func() (bool, error) { - share, err = manilaClient.GetShareByID(shareID) + share, err = manilaClient.GetShareByID(ctx, shareID) if err != nil { if clouderrors.IsNotFound(err) && successOnNotFound { @@ -187,7 +188,7 @@ func waitForShareStatus(manilaClient manilaclient.Interface, shareID string, val } if isShareInErrorState(share.Status) { - manilaErrMsg, err := lastResourceError(manilaClient, shareID) + manilaErrMsg, err := lastResourceError(ctx, manilaClient, shareID) if err != nil { return false, fmt.Errorf("share %s is in error state, error description could not be retrieved: %v", shareID, err) } @@ -200,7 +201,7 @@ func waitForShareStatus(manilaClient manilaclient.Interface, shareID string, val }) } -func resolveShareListToUUIDs(manilaClient manilaclient.Interface, affinityList string) (string, error) { +func resolveShareListToUUIDs(ctx context.Context, manilaClient manilaclient.Interface, affinityList string) (string, error) { list := util.SplitTrim(affinityList, ',') if len(list) == 0 { return "", nil @@ -213,14 +214,14 @@ func resolveShareListToUUIDs(manilaClient manilaclient.Interface, affinityList s if id, e := util.UUID(v); e == nil { // First try to get share by ID - share, err = manilaClient.GetShareByID(id) + share, err = manilaClient.GetShareByID(ctx, id) if err != nil && clouderrors.IsNotFound(err) { // If not found by ID, try to get share by ID as name - share, err = manilaClient.GetShareByName(v) + share, err = manilaClient.GetShareByName(ctx, v) } } else { // If not a UUID, try to get share by name - share, err = manilaClient.GetShareByName(v) + share, err = manilaClient.GetShareByName(ctx, v) } if err != nil { if clouderrors.IsNotFound(err) { diff --git a/pkg/csi/manila/shareadapters/cephfs.go b/pkg/csi/manila/shareadapters/cephfs.go index 38b0e7119c..e64744a2d5 100644 --- a/pkg/csi/manila/shareadapters/cephfs.go +++ b/pkg/csi/manila/shareadapters/cephfs.go @@ -17,6 +17,7 @@ limitations under the License. package shareadapters import ( + "context" "fmt" "time" @@ -31,7 +32,7 @@ type Cephfs struct{} var _ ShareAdapter = &Cephfs{} -func (Cephfs) GetOrGrantAccess(args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) { +func (Cephfs) GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) { // First, check if the access right exists or needs to be created var rights []shares.AccessRight @@ -41,7 +42,7 @@ func (Cephfs) GetOrGrantAccess(args *GrantAccessArgs) (accessRight *shares.Acces accessTo = args.Share.Name } - rights, err = args.ManilaClient.GetAccessRights(args.Share.ID) + rights, err = args.ManilaClient.GetAccessRights(ctx, args.Share.ID) if err != nil { if _, ok := err.(gophercloud.ErrResourceNotFound); !ok { return nil, fmt.Errorf("failed to list access rights: %v", err) @@ -62,7 +63,7 @@ func (Cephfs) GetOrGrantAccess(args *GrantAccessArgs) (accessRight *shares.Acces if accessRight == nil { // Not found, create it - accessRight, err = args.ManilaClient.GrantAccess(args.Share.ID, shares.GrantAccessOpts{ + accessRight, err = args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ AccessType: "cephx", AccessLevel: "rw", AccessTo: accessTo, @@ -87,7 +88,7 @@ func (Cephfs) GetOrGrantAccess(args *GrantAccessArgs) (accessRight *shares.Acces } return accessRight, wait.ExponentialBackoff(backoff, func() (bool, error) { - rights, err := args.ManilaClient.GetAccessRights(args.Share.ID) + rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) if err != nil { return false, err } diff --git a/pkg/csi/manila/shareadapters/nfs.go b/pkg/csi/manila/shareadapters/nfs.go index ab24320741..36591e54d1 100644 --- a/pkg/csi/manila/shareadapters/nfs.go +++ b/pkg/csi/manila/shareadapters/nfs.go @@ -17,6 +17,7 @@ limitations under the License. package shareadapters import ( + "context" "fmt" "net" "strings" @@ -32,10 +33,10 @@ type NFS struct{} var _ ShareAdapter = &NFS{} -func (NFS) GetOrGrantAccess(args *GrantAccessArgs) (*shares.AccessRight, error) { +func (NFS) GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (*shares.AccessRight, error) { // First, check if the access right exists or needs to be created - rights, err := args.ManilaClient.GetAccessRights(args.Share.ID) + rights, err := args.ManilaClient.GetAccessRights(ctx, args.Share.ID) if err != nil { if _, ok := err.(gophercloud.ErrResourceNotFound); !ok { return nil, fmt.Errorf("failed to list access rights: %v", err) @@ -53,7 +54,7 @@ func (NFS) GetOrGrantAccess(args *GrantAccessArgs) (*shares.AccessRight, error) // Not found, create it - return args.ManilaClient.GrantAccess(args.Share.ID, shares.GrantAccessOpts{ + return args.ManilaClient.GrantAccess(ctx, args.Share.ID, shares.GrantAccessOpts{ AccessType: "ip", AccessLevel: "rw", AccessTo: args.Options.NFSShareClient, diff --git a/pkg/csi/manila/shareadapters/shareadapter.go b/pkg/csi/manila/shareadapters/shareadapter.go index d1e1d6fe0e..2bd28a3b10 100644 --- a/pkg/csi/manila/shareadapters/shareadapter.go +++ b/pkg/csi/manila/shareadapters/shareadapter.go @@ -17,6 +17,8 @@ limitations under the License. package shareadapters import ( + "context" + "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/shares" "k8s.io/cloud-provider-openstack/pkg/csi/manila/manilaclient" "k8s.io/cloud-provider-openstack/pkg/csi/manila/options" @@ -44,7 +46,7 @@ type ShareAdapter interface { // GetOrGrantAccess first tries to retrieve an access right for args.Share. // An access right is created for the share in case it doesn't exist yet. // Returns an existing or new access right for args.Share. - GetOrGrantAccess(args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) + GetOrGrantAccess(ctx context.Context, args *GrantAccessArgs) (accessRight *shares.AccessRight, err error) // BuildVolumeContext builds a volume context map that's passed to NodeStageVolumeRequest and NodePublishVolumeRequest BuildVolumeContext(args *VolumeContextArgs) (volumeContext map[string]string, err error) diff --git a/pkg/csi/manila/snapshot.go b/pkg/csi/manila/snapshot.go index 79d8cd8d7c..967c6646f2 100644 --- a/pkg/csi/manila/snapshot.go +++ b/pkg/csi/manila/snapshot.go @@ -17,6 +17,7 @@ limitations under the License. package manila import ( + "context" "fmt" "time" @@ -38,7 +39,7 @@ const ( // getOrCreateSnapshot retrieves an existing snapshot with name=snapName, or creates a new one if it doesn't exist yet. // Instead of waiting for the snapshot to become available (as getOrCreateShare does), CSI's ready_to_use flag is used to signal readiness -func getOrCreateSnapshot(manilaClient manilaclient.Interface, snapName, sourceShareID string) (*snapshots.Snapshot, error) { +func getOrCreateSnapshot(ctx context.Context, manilaClient manilaclient.Interface, snapName, sourceShareID string) (*snapshots.Snapshot, error) { var ( snapshot *snapshots.Snapshot err error @@ -46,7 +47,7 @@ func getOrCreateSnapshot(manilaClient manilaclient.Interface, snapName, sourceSh // First, check if the snapshot already exists or needs to be created - if snapshot, err = manilaClient.GetSnapshotByName(snapName); err != nil { + if snapshot, err = manilaClient.GetSnapshotByName(ctx, snapName); err != nil { if clouderrors.IsNotFound(err) { // It doesn't exist, create it @@ -57,7 +58,7 @@ func getOrCreateSnapshot(manilaClient manilaclient.Interface, snapName, sourceSh } var createErr error - if snapshot, createErr = manilaClient.CreateSnapshot(opts); createErr != nil { + if snapshot, createErr = manilaClient.CreateSnapshot(ctx, opts); createErr != nil { return nil, createErr } @@ -72,8 +73,8 @@ func getOrCreateSnapshot(manilaClient manilaclient.Interface, snapName, sourceSh return snapshot, nil } -func deleteSnapshot(manilaClient manilaclient.Interface, snapID string) error { - if err := manilaClient.DeleteSnapshot(snapID); err != nil { +func deleteSnapshot(ctx context.Context, manilaClient manilaclient.Interface, snapID string) error { + if err := manilaClient.DeleteSnapshot(ctx, snapID); err != nil { if clouderrors.IsNotFound(err) { klog.V(4).Infof("snapshot %s not found, assuming it to be already deleted", snapID) } else { @@ -84,24 +85,24 @@ func deleteSnapshot(manilaClient manilaclient.Interface, snapID string) error { return nil } -func tryDeleteSnapshot(manilaClient manilaclient.Interface, snapshot *snapshots.Snapshot) { +func tryDeleteSnapshot(ctx context.Context, manilaClient manilaclient.Interface, snapshot *snapshots.Snapshot) { if snapshot == nil { return } - if err := deleteSnapshot(manilaClient, snapshot.ID); err != nil { + if err := deleteSnapshot(ctx, manilaClient, snapshot.ID); err != nil { // TODO failure to delete a snapshot in an error state needs proper monitoring support klog.Errorf("couldn't delete snapshot %s in a roll-back procedure: %v", snapshot.ID, err) return } - _, _, err := waitForSnapshotStatus(manilaClient, snapshot.ID, snapshotDeleting, "", true) + _, _, err := waitForSnapshotStatus(ctx, manilaClient, snapshot.ID, snapshotDeleting, "", true) if err != nil && !wait.Interrupted(err) { klog.Errorf("couldn't retrieve snapshot %s in a roll-back procedure: %v", snapshot.ID, err) } } -func waitForSnapshotStatus(manilaClient manilaclient.Interface, snapshotID, currentStatus, desiredStatus string, successOnNotFound bool) (*snapshots.Snapshot, manilaError, error) { +func waitForSnapshotStatus(ctx context.Context, manilaClient manilaclient.Interface, snapshotID, currentStatus, desiredStatus string, successOnNotFound bool) (*snapshots.Snapshot, manilaError, error) { var ( backoff = wait.Backoff{ Duration: time.Second * waitForAvailableShareTimeout, @@ -115,7 +116,7 @@ func waitForSnapshotStatus(manilaClient manilaclient.Interface, snapshotID, curr ) return snapshot, manilaErrCode, wait.ExponentialBackoff(backoff, func() (bool, error) { - snapshot, err = manilaClient.GetSnapshotByID(snapshotID) + snapshot, err = manilaClient.GetSnapshotByID(ctx, snapshotID) if err != nil { if clouderrors.IsNotFound(err) && successOnNotFound { @@ -133,7 +134,7 @@ func waitForSnapshotStatus(manilaClient manilaclient.Interface, snapshotID, curr case desiredStatus: isAvailable = true case shareError: - manilaErrMsg, err := lastResourceError(manilaClient, snapshotID) + manilaErrMsg, err := lastResourceError(ctx, manilaClient, snapshotID) if err != nil { return false, fmt.Errorf("snapshot %s is in error state, error description could not be retrieved: %v", snapshotID, err) } diff --git a/pkg/csi/manila/util.go b/pkg/csi/manila/util.go index 67715bfc50..bec314ac4d 100644 --- a/pkg/csi/manila/util.go +++ b/pkg/csi/manila/util.go @@ -17,6 +17,7 @@ limitations under the License. package manila import ( + "context" "errors" "fmt" "strings" @@ -129,8 +130,8 @@ func bytesToGiB(sizeInBytes int64) int { return sizeInGiB } -func lastResourceError(manilaClient manilaclient.Interface, resourceID string) (manilaErrorMessage, error) { - msgs, err := manilaClient.GetUserMessages(&messages.ListOpts{ +func lastResourceError(ctx context.Context, manilaClient manilaclient.Interface, resourceID string) (manilaErrorMessage, error) { + msgs, err := manilaClient.GetUserMessages(ctx, &messages.ListOpts{ ResourceID: resourceID, MessageLevel: "ERROR", Limit: 1, diff --git a/pkg/csi/manila/volumesource.go b/pkg/csi/manila/volumesource.go index e2f82c9ced..d3c0a9c130 100644 --- a/pkg/csi/manila/volumesource.go +++ b/pkg/csi/manila/volumesource.go @@ -17,6 +17,8 @@ limitations under the License. package manila import ( + "context" + "github.com/gophercloud/gophercloud/v2/openstack/sharedfilesystems/v2/shares" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -27,10 +29,10 @@ import ( ) type volumeCreator interface { - create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) + create(ctx context.Context, manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) } -func create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string, snapshotID string) (*shares.Share, error) { +func create(ctx context.Context, manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string, snapshotID string) (*shares.Share, error) { createOpts := &shares.CreateOpts{ AvailabilityZone: shareOpts.AvailabilityZone, ShareProto: shareOpts.Protocol, @@ -56,7 +58,7 @@ func create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int } } - share, manilaErrCode, err := getOrCreateShare(manilaClient, shareName, createOpts) + share, manilaErrCode, err := getOrCreateShare(ctx, manilaClient, shareName, createOpts) if err != nil { if wait.Interrupted(err) { return nil, status.Errorf(codes.DeadlineExceeded, "deadline exceeded while waiting for volume %s to become available", shareName) @@ -64,7 +66,7 @@ func create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int if manilaErrCode != 0 { // An error has occurred, try to roll-back the share - tryDeleteShare(manilaClient, share) + tryDeleteShare(ctx, manilaClient, share) } if snapshotID != "" { @@ -78,20 +80,20 @@ func create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int type blankVolume struct{} -func (blankVolume) create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { - return create(manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata, "") +func (blankVolume) create(ctx context.Context, manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { + return create(ctx, manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata, "") } type volumeFromSnapshot struct { snapshotID string } -func (v volumeFromSnapshot) create(manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { +func (v volumeFromSnapshot) create(ctx context.Context, manilaClient manilaclient.Interface, shareName string, sizeInGiB int, shareOpts *options.ControllerVolumeContext, shareMetadata map[string]string) (*shares.Share, error) { if v.snapshotID == "" { return nil, status.Error(codes.InvalidArgument, "snapshot ID cannot be empty") } - snapshot, err := manilaClient.GetSnapshotByID(v.snapshotID) + snapshot, err := manilaClient.GetSnapshotByID(ctx, v.snapshotID) if err != nil { if clouderrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "source snapshot %s not found: %v", v.snapshotID, err) @@ -108,5 +110,5 @@ func (v volumeFromSnapshot) create(manilaClient manilaclient.Interface, shareNam return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s is in invalid state: expected 'available', got '%s'", snapshot.ID, snapshot.Status) } - return create(manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata, snapshot.ID) + return create(ctx, manilaClient, shareName, sizeInGiB, shareOpts, shareMetadata, snapshot.ID) } diff --git a/pkg/identity/keystone/authenticator.go b/pkg/identity/keystone/authenticator.go index 6c7d2c433c..6776799698 100644 --- a/pkg/identity/keystone/authenticator.go +++ b/pkg/identity/keystone/authenticator.go @@ -38,8 +38,8 @@ type tokenInfo struct { } type IKeystone interface { - GetTokenInfo(string) (*tokenInfo, error) - GetGroups(string, string) ([]string, error) + GetTokenInfo(context.Context, string) (*tokenInfo, error) + GetGroups(context.Context, string, string) ([]string, error) } type Keystoner struct { @@ -53,9 +53,9 @@ func NewKeystoner(client *gophercloud.ServiceClient) *Keystoner { } // revive:disable:unexported-return -func (k *Keystoner) GetTokenInfo(token string) (*tokenInfo, error) { +func (k *Keystoner) GetTokenInfo(ctx context.Context, token string) (*tokenInfo, error) { k.client.ProviderClient.SetToken(token) - ret := tokens.Get(context.TODO(), k.client, token) + ret := tokens.Get(ctx, k.client, token) tokenUser, err := ret.ExtractUser() if err != nil { @@ -90,9 +90,9 @@ func (k *Keystoner) GetTokenInfo(token string) (*tokenInfo, error) { // revive:enable:unexported-return -func (k *Keystoner) GetGroups(token string, userID string) ([]string, error) { +func (k *Keystoner) GetGroups(ctx context.Context, token string, userID string) ([]string, error) { k.client.ProviderClient.SetToken(token) - allGroupPages, err := users.ListGroups(k.client, userID).AllPages(context.TODO()) + allGroupPages, err := users.ListGroups(k.client, userID).AllPages(ctx) if err != nil { return nil, fmt.Errorf("failed to get user groups from Keystone: %v", err) } @@ -116,13 +116,13 @@ type Authenticator struct { } // AuthenticateToken checks the token via Keystone call -func (a *Authenticator) AuthenticateToken(token string) (user.Info, bool, error) { - tokenInfo, err := a.keystoner.GetTokenInfo(token) +func (a *Authenticator) AuthenticateToken(ctx context.Context, token string) (user.Info, bool, error) { + tokenInfo, err := a.keystoner.GetTokenInfo(ctx, token) if err != nil { return nil, false, fmt.Errorf("failed to authenticate: %v", err) } - userGroups, err := a.keystoner.GetGroups(token, tokenInfo.userID) + userGroups, err := a.keystoner.GetGroups(ctx, token, tokenInfo.userID) if err != nil { return nil, false, fmt.Errorf("failed to authenticate: %v", err) } diff --git a/pkg/identity/keystone/authenticator_test.go b/pkg/identity/keystone/authenticator_test.go index 4deca5f90b..841ff4a26a 100644 --- a/pkg/identity/keystone/authenticator_test.go +++ b/pkg/identity/keystone/authenticator_test.go @@ -14,6 +14,7 @@ limitations under the License. package keystone import ( + "context" "testing" th "github.com/gophercloud/gophercloud/v2/testhelper" @@ -42,7 +43,7 @@ func TestAuthenticateToken(t *testing.T) { a := &Authenticator{ keystoner: keystone, } - userInfo, allowed, err := a.AuthenticateToken("token") + userInfo, allowed, err := a.AuthenticateToken(context.TODO(), "token") th.AssertNoErr(t, err) th.AssertEquals(t, true, allowed) diff --git a/pkg/identity/keystone/keystone.go b/pkg/identity/keystone/keystone.go index 5f1d92ef3f..99b1fdf267 100644 --- a/pkg/identity/keystone/keystone.go +++ b/pkg/identity/keystone/keystone.go @@ -247,6 +247,7 @@ func (k *Auth) processItem(key string) error { // Handler serves the http requests func (k *Auth) Handler(w http.ResponseWriter, r *http.Request) { + ctx := context.TODO() var data map[string]interface{} decoder := json.NewDecoder(r.Body) defer r.Body.Close() @@ -266,12 +267,12 @@ func (k *Auth) Handler(w http.ResponseWriter, r *http.Request) { if kind == "TokenReview" { var token = data["spec"].(map[string]interface{})["token"].(string) - userInfo := k.authenticateToken(w, r, token, data) + userInfo := k.authenticateToken(ctx, w, r, token, data) // Do synchronization // In the case of unscoped tokens, when project id is not defined, we have to skip this part if k.syncer.syncConfig != nil && len(k.syncer.syncConfig.DataTypesToSync) > 0 && userInfo != nil && len(userInfo.Extra[ProjectID]) != 0 { - err = k.syncer.syncData(userInfo) + err = k.syncer.syncData(ctx, userInfo) if err != nil { klog.Errorf("an error occurred during data synchronization: %v", err) } @@ -283,8 +284,8 @@ func (k *Auth) Handler(w http.ResponseWriter, r *http.Request) { } } -func (k *Auth) authenticateToken(w http.ResponseWriter, r *http.Request, token string, data map[string]interface{}) *userInfo { - user, authenticated, err := k.authn.AuthenticateToken(token) +func (k *Auth) authenticateToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string, data map[string]interface{}) *userInfo { + user, authenticated, err := k.authn.AuthenticateToken(ctx, token) klog.V(4).Infof("authenticateToken : %v, %v, %v\n", token, user, err) if !authenticated { @@ -405,8 +406,8 @@ func (k *Auth) authorizeToken(w http.ResponseWriter, r *http.Request, data map[s } // NewKeystoneAuth returns a new KeystoneAuth controller -func NewKeystoneAuth(c *Config) (*Auth, error) { - keystoneClient, err := createKeystoneClient(c.KeystoneURL, c.KeystoneCA) +func NewKeystoneAuth(ctx context.Context, c *Config) (*Auth, error) { + keystoneClient, err := createKeystoneClient(ctx, c.KeystoneURL, c.KeystoneCA) if err != nil { return nil, fmt.Errorf("failed to initialize keystone client: %v", err) } @@ -424,7 +425,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // is possible that both are not provided, in this case, the keystone webhook authorization will always return deny. var policy policyList if c.PolicyConfigMapName != "" { - cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.PolicyConfigMapName, metav1.GetOptions{}) + cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(ctx, c.PolicyConfigMapName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get configmap %s: %v", c.PolicyConfigMapName, err) } @@ -454,7 +455,7 @@ func NewKeystoneAuth(c *Config) (*Auth, error) { // is possible that both are not provided, in this case, the keystone webhook authenticator will not synchronize data. var sc *syncConfig if c.SyncConfigMapName != "" { - cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(context.TODO(), c.SyncConfigMapName, metav1.GetOptions{}) + cm, err := k8sClient.CoreV1().ConfigMaps(cmNamespace).Get(ctx, c.SyncConfigMapName, metav1.GetOptions{}) if err != nil { klog.Errorf("configmap get err #%v ", err) return nil, fmt.Errorf("failed to get configmap %s: %v", c.SyncConfigMapName, err) @@ -529,7 +530,7 @@ func getField(data map[string]interface{}, name string) string { } // Construct a Keystone v3 client, bail out if we cannot find the v3 API endpoint -func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.RoundTripper) (*gophercloud.ProviderClient, error) { +func createIdentityV3Provider(ctx context.Context, options gophercloud.AuthOptions, transport http.RoundTripper) (*gophercloud.ProviderClient, error) { client, err := openstack.NewClient(options.IdentityEndpoint) if err != nil { return nil, err @@ -542,7 +543,7 @@ func createIdentityV3Provider(options gophercloud.AuthOptions, transport http.Ro versions := []*utils.Version{ {ID: "v3", Priority: 30, Suffix: "/v3/"}, } - chosen, _, err := utils.ChooseVersion(context.TODO(), client, versions) + chosen, _, err := utils.ChooseVersion(ctx, client, versions) if err != nil { return nil, fmt.Errorf("unable to find identity API v3 version : %v", err) } @@ -578,7 +579,7 @@ func createKubernetesClient(kubeConfig string) (*kubernetes.Clientset, error) { return client, nil } -func createKeystoneClient(authURL string, caFile string) (*gophercloud.ServiceClient, error) { +func createKeystoneClient(ctx context.Context, authURL string, caFile string) (*gophercloud.ServiceClient, error) { // FIXME: Enable this check later //if !strings.HasPrefix(authURL, "https") { // return nil, errors.New("Auth URL should be secure and start with https") @@ -597,7 +598,7 @@ func createKeystoneClient(authURL string, caFile string) (*gophercloud.ServiceCl transport = netutil.SetOldTransportDefaults(&http.Transport{TLSClientConfig: config}) } opts := gophercloud.AuthOptions{IdentityEndpoint: authURL} - provider, err := createIdentityV3Provider(opts, transport) + provider, err := createIdentityV3Provider(ctx, opts, transport) if err != nil { return nil, err } diff --git a/pkg/identity/keystone/mock_IKeystone.go b/pkg/identity/keystone/mock_IKeystone.go index 6f1d457423..4478f23a9d 100644 --- a/pkg/identity/keystone/mock_IKeystone.go +++ b/pkg/identity/keystone/mock_IKeystone.go @@ -2,7 +2,11 @@ package keystone -import mock "github.com/stretchr/testify/mock" +import ( + "context" + + "github.com/stretchr/testify/mock" +) // MockIKeystone is an autogenerated mock type for the IKeystone type type MockIKeystone struct { @@ -10,7 +14,7 @@ type MockIKeystone struct { } // GetGroups provides a mock function with given fields: _a0, _a1 -func (_m *MockIKeystone) GetGroups(_a0 string, _a1 string) ([]string, error) { +func (_m *MockIKeystone) GetGroups(_ context.Context, _a0 string, _a1 string) ([]string, error) { ret := _m.Called(_a0, _a1) var r0 []string @@ -33,7 +37,7 @@ func (_m *MockIKeystone) GetGroups(_a0 string, _a1 string) ([]string, error) { } // GetTokenInfo provides a mock function with given fields: _a0 -func (_m *MockIKeystone) GetTokenInfo(_a0 string) (*tokenInfo, error) { +func (_m *MockIKeystone) GetTokenInfo(_ context.Context, _a0 string) (*tokenInfo, error) { ret := _m.Called(_a0) var r0 *tokenInfo diff --git a/pkg/identity/keystone/sync.go b/pkg/identity/keystone/sync.go index 610aaffbad..1e8bfac2bd 100644 --- a/pkg/identity/keystone/sync.go +++ b/pkg/identity/keystone/sync.go @@ -154,7 +154,7 @@ type Syncer struct { mu sync.Mutex } -func (s *Syncer) syncData(u *userInfo) error { +func (s *Syncer) syncData(ctx context.Context, u *userInfo) error { s.mu.Lock() defer s.mu.Unlock() @@ -185,7 +185,7 @@ func (s *Syncer) syncData(u *userInfo) error { // sync project data first for _, dataType := range s.syncConfig.DataTypesToSync { if dataType == Projects { - err := s.syncProjectData(u, namespaceName) + err := s.syncProjectData(ctx, u, namespaceName) if err != nil { return err } @@ -194,7 +194,7 @@ func (s *Syncer) syncData(u *userInfo) error { for _, dataType := range s.syncConfig.DataTypesToSync { if dataType == RoleAssignments { - err := s.syncRoleAssignmentsData(u, namespaceName) + err := s.syncRoleAssignmentsData(ctx, u, namespaceName) if err != nil { return err } @@ -204,8 +204,8 @@ func (s *Syncer) syncData(u *userInfo) error { return nil } -func (s *Syncer) syncProjectData(u *userInfo, namespaceName string) error { - _, err := s.k8sClient.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) +func (s *Syncer) syncProjectData(ctx context.Context, u *userInfo, namespaceName string) error { + _, err := s.k8sClient.CoreV1().Namespaces().Get(ctx, namespaceName, metav1.GetOptions{}) if k8serrors.IsNotFound(err) { // The required namespace is not found. Create it then. @@ -214,7 +214,7 @@ func (s *Syncer) syncProjectData(u *userInfo, namespaceName string) error { Name: namespaceName, }, } - _, err := s.k8sClient.CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) + _, err := s.k8sClient.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) if err != nil { klog.Warningf("Cannot create a namespace for the user: %v", err) return errors.New("internal server error") @@ -228,9 +228,9 @@ func (s *Syncer) syncProjectData(u *userInfo, namespaceName string) error { return nil } -func (s *Syncer) syncRoleAssignmentsData(u *userInfo, namespaceName string) error { +func (s *Syncer) syncRoleAssignmentsData(ctx context.Context, u *userInfo, namespaceName string) error { // TODO(mfedosin): add a field separator to filter out unnecessary roles bindings at an early stage - roleBindings, err := s.k8sClient.RbacV1().RoleBindings(namespaceName).List(context.TODO(), metav1.ListOptions{}) + roleBindings, err := s.k8sClient.RbacV1().RoleBindings(namespaceName).List(ctx, metav1.ListOptions{}) if err != nil { klog.Warningf("Cannot get a list of role bindings from the server: %v", err) return errors.New("internal server error") @@ -253,7 +253,7 @@ func (s *Syncer) syncRoleAssignmentsData(u *userInfo, namespaceName string) erro } } if !keepRoleBinding { - err = s.k8sClient.RbacV1().RoleBindings(namespaceName).Delete(context.TODO(), roleBinding.Name, metav1.DeleteOptions{}) + err = s.k8sClient.RbacV1().RoleBindings(namespaceName).Delete(ctx, roleBinding.Name, metav1.DeleteOptions{}) if err != nil { klog.Warningf("Cannot delete a role binding from the server: %v", err) return errors.New("internal server error") @@ -295,7 +295,7 @@ func (s *Syncer) syncRoleAssignmentsData(u *userInfo, namespaceName string) erro Name: roleName, }, } - _, err := s.k8sClient.RbacV1().RoleBindings(namespaceName).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) + _, err := s.k8sClient.RbacV1().RoleBindings(namespaceName).Create(ctx, roleBinding, metav1.CreateOptions{}) if err != nil { klog.Warningf("Cannot create a role binding for the user: %v", err) return errors.New("internal server error") diff --git a/pkg/identity/keystone/token_getter.go b/pkg/identity/keystone/token_getter.go index 68dd774e4f..7b0e5d14d5 100644 --- a/pkg/identity/keystone/token_getter.go +++ b/pkg/identity/keystone/token_getter.go @@ -41,7 +41,7 @@ type Options struct { } // GetToken creates a token by authenticate with keystone. -func GetToken(options Options) (*tokens3.Token, error) { +func GetToken(ctx context.Context, options Options) (*tokens3.Token, error) { var token *tokens3.Token var setTransport bool @@ -113,7 +113,7 @@ func GetToken(options Options) (*tokens3.Token, error) { } // Issue new unscoped token - result := tokens3.Create(context.TODO(), v3Client, &options.AuthOptions) + result := tokens3.Create(ctx, v3Client, &options.AuthOptions) if result.Err != nil { return token, result.Err } diff --git a/pkg/identity/keystone/token_getter_test.go b/pkg/identity/keystone/token_getter_test.go index 6562da7545..86b80a8ab2 100644 --- a/pkg/identity/keystone/token_getter_test.go +++ b/pkg/identity/keystone/token_getter_test.go @@ -14,6 +14,7 @@ limitations under the License. package keystone import ( + "context" "encoding/json" "fmt" "io" @@ -89,7 +90,7 @@ func TestTokenGetter(t *testing.T) { }, } - token, err := GetToken(options) + token, err := GetToken(context.TODO(), options) th.AssertNoErr(t, err) th.AssertEquals(t, "0123456789", token.ID) th.AssertEquals(t, "2015-11-09 01:42:57.527363 +0000 UTC", token.ExpiresAt.String()) @@ -97,7 +98,7 @@ func TestTokenGetter(t *testing.T) { // Incorrect password options.AuthOptions.Password = "wrongpw" - _, err = GetToken(options) + _, err = GetToken(context.TODO(), options) if !gophercloud.ResponseCodeIs(err, http.StatusUnauthorized) { t.FailNow() } @@ -105,6 +106,6 @@ func TestTokenGetter(t *testing.T) { // Invalid auth data options.AuthOptions.Password = "" - _, err = GetToken(options) + _, err = GetToken(context.TODO(), options) th.AssertEquals(t, "You must provide a password to authenticate", err.Error()) } diff --git a/pkg/ingress/cmd/root.go b/pkg/ingress/cmd/root.go index f89542a44d..3a02ac8428 100644 --- a/pkg/ingress/cmd/root.go +++ b/pkg/ingress/cmd/root.go @@ -17,6 +17,7 @@ limitations under the License. package cmd import ( + "context" "fmt" "os" "os/signal" @@ -47,12 +48,13 @@ var rootCmd = &cobra.Command{ Long: `Ingress controller for OpenStack`, Run: func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() osIngress := controller.NewController(conf) - osIngress.Start() + osIngress.Start(ctx) sigterm := make(chan os.Signal, 1) - signal.Notify(sigterm, syscall.SIGTERM) - signal.Notify(sigterm, syscall.SIGINT) + signal.Notify(sigterm, syscall.SIGTERM, syscall.SIGINT) <-sigterm }, Version: version.Version, diff --git a/pkg/ingress/controller/controller.go b/pkg/ingress/controller/controller.go index c7471b0b07..09144a318c 100644 --- a/pkg/ingress/controller/controller.go +++ b/pkg/ingress/controller/controller.go @@ -158,7 +158,6 @@ type Event struct { // Controller ... type Controller struct { - stopCh chan struct{} knownNodes []*apiv1.Node queue workqueue.TypedRateLimitingInterface[any] informer informers.SharedInformerFactory @@ -322,7 +321,6 @@ func NewController(conf config.Config) *Controller { controller := &Controller{ config: conf, queue: queue, - stopCh: make(chan struct{}), informer: kubeInformerFactory, recorder: recorder, serviceLister: serviceInformer.Lister(), @@ -417,16 +415,15 @@ func NewController(conf config.Config) *Controller { } // Start starts the openstack ingress controller. -func (c *Controller) Start() { - defer close(c.stopCh) +func (c *Controller) Start(ctx context.Context) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() log.Debug("starting Ingress controller") - go c.informer.Start(c.stopCh) + go c.informer.Start(ctx.Done()) // wait for the caches to synchronize before starting the worker - if !cache.WaitForCacheSync(c.stopCh, c.ingressListerSynced, c.serviceListerSynced, c.nodeListerSynced) { + if !cache.WaitForCacheSync(ctx.Done(), c.ingressListerSynced, c.serviceListerSynced, c.nodeListerSynced) { utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) return } @@ -440,22 +437,22 @@ func (c *Controller) Start() { c.knownNodes = readyWorkerNodes // Get subnet CIDR. The subnet CIDR will be used as source IP range for related security group rules. - subnet, err := c.osClient.GetSubnet(c.config.Octavia.SubnetID) + subnet, err := c.osClient.GetSubnet(ctx, c.config.Octavia.SubnetID) if err != nil { log.Errorf("Failed to retrieve the subnet %s: %v", c.config.Octavia.SubnetID, err) return } c.subnetCIDR = subnet.CIDR - go wait.Until(c.runWorker, time.Second, c.stopCh) - go wait.Until(c.nodeSyncLoop, 60*time.Second, c.stopCh) + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + go wait.UntilWithContext(ctx, c.nodeSyncLoop, 60*time.Second) - <-c.stopCh + <-ctx.Done() } // nodeSyncLoop handles updating the hosts pointed to by all load // balancers whenever the set of nodes in the cluster changes. -func (c *Controller) nodeSyncLoop() { +func (c *Controller) nodeSyncLoop(ctx context.Context) { readyWorkerNodes, err := listWithPredicate(c.nodeLister, getNodeConditionPredicate()) if err != nil { log.Errorf("Failed to retrieve current set of nodes from node lister: %v", err) @@ -477,7 +474,7 @@ func (c *Controller) nodeSyncLoop() { var ings *nwv1.IngressList // NOTE(lingxiankong): only take ingresses without ip address into consideration opts := apimetav1.ListOptions{} - if ings, err = c.kubeClient.NetworkingV1().Ingresses("").List(context.TODO(), opts); err != nil { + if ings, err = c.kubeClient.NetworkingV1().Ingresses("").List(ctx, opts); err != nil { log.Errorf("Failed to retrieve current set of ingresses: %v", err) return } @@ -491,7 +488,7 @@ func (c *Controller) nodeSyncLoop() { log.WithFields(log.Fields{"ingress": ing.Name, "namespace": ing.Namespace}).Debug("Starting to handle ingress") lbName := utils.GetResourceName(ing.Namespace, ing.Name, c.config.ClusterName) - loadbalancer, err := openstackutil.GetLoadbalancerByName(c.osClient.Octavia, lbName) + loadbalancer, err := openstackutil.GetLoadbalancerByName(ctx, c.osClient.Octavia, lbName) if err != nil { if err != cpoerrors.ErrNotFound { log.WithFields(log.Fields{"name": lbName}).Errorf("Failed to retrieve loadbalancer from OpenStack: %v", err) @@ -501,7 +498,7 @@ func (c *Controller) nodeSyncLoop() { continue } - if err = c.osClient.UpdateLoadbalancerMembers(loadbalancer.ID, readyWorkerNodes); err != nil { + if err = c.osClient.UpdateLoadbalancerMembers(ctx, loadbalancer.ID, readyWorkerNodes); err != nil { log.WithFields(log.Fields{"ingress": ing.Name}).Error("Failed to handle ingress") continue } @@ -514,13 +511,13 @@ func (c *Controller) nodeSyncLoop() { log.Info("Finished to handle node change") } -func (c *Controller) runWorker() { - for c.processNextItem() { +func (c *Controller) runWorker(ctx context.Context) { + for c.processNextItem(ctx) { // continue looping } } -func (c *Controller) processNextItem() bool { +func (c *Controller) processNextItem(ctx context.Context) bool { obj, quit := c.queue.Get() if quit { @@ -528,7 +525,7 @@ func (c *Controller) processNextItem() bool { } defer c.queue.Done(obj) - err := c.processItem(obj.(Event)) + err := c.processItem(ctx, obj.(Event)) if err == nil { // No error, reset the ratelimit counters c.queue.Forget(obj) @@ -545,7 +542,7 @@ func (c *Controller) processNextItem() bool { return true } -func (c *Controller) processItem(event Event) error { +func (c *Controller) processItem(ctx context.Context, event Event) error { ing := event.Obj.(*nwv1.Ingress) key := fmt.Sprintf("%s/%s", ing.Namespace, ing.Name) logger := log.WithFields(log.Fields{"ingress": key}) @@ -554,7 +551,7 @@ func (c *Controller) processItem(event Event) error { case CreateEvent: logger.Info("creating ingress") - if err := c.ensureIngress(ing); err != nil { + if err := c.ensureIngress(ctx, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to create openstack resources for ingress %s: %v", key, err)) c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to create openstack resources for ingress %s: %v", key, err)) } else { @@ -563,7 +560,7 @@ func (c *Controller) processItem(event Event) error { case UpdateEvent: logger.Info("updating ingress") - if err := c.ensureIngress(ing); err != nil { + if err := c.ensureIngress(ctx, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to update openstack resources for ingress %s: %v", key, err)) c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to update openstack resources for ingress %s: %v", key, err)) } else { @@ -572,7 +569,7 @@ func (c *Controller) processItem(event Event) error { case DeleteEvent: logger.Info("deleting ingress") - if err := c.deleteIngress(ing); err != nil { + if err := c.deleteIngress(ctx, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to delete openstack resources for ingress %s: %v", key, err)) c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to delete openstack resources for ingress %s: %v", key, err)) } else { @@ -583,13 +580,13 @@ func (c *Controller) processItem(event Event) error { return nil } -func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { +func (c *Controller) deleteIngress(ctx context.Context, ing *nwv1.Ingress) error { key := fmt.Sprintf("%s/%s", ing.Namespace, ing.Name) lbName := utils.GetResourceName(ing.Namespace, ing.Name, c.config.ClusterName) logger := log.WithFields(log.Fields{"ingress": key}) // If load balancer doesn't exist, assume it's already deleted. - loadbalancer, err := openstackutil.GetLoadbalancerByName(c.osClient.Octavia, lbName) + loadbalancer, err := openstackutil.GetLoadbalancerByName(ctx, c.osClient.Octavia, lbName) if err != nil { if err != cpoerrors.ErrNotFound { return fmt.Errorf("error getting loadbalancer %s: %v", ing.Name, err) @@ -611,7 +608,7 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { // any floating IPs associated with the load balancer VIP port. logger.WithFields(log.Fields{"lbID": loadbalancer.ID, "VIP": loadbalancer.VipAddress}).Info("deleting floating IPs associated with the load balancer VIP port") - if _, err = c.osClient.EnsureFloatingIP(true, loadbalancer.VipPortID, "", "", ""); err != nil { + if _, err = c.osClient.EnsureFloatingIP(ctx, true, loadbalancer.VipPortID, "", "", ""); err != nil { return fmt.Errorf("failed to delete floating IP: %v", err) } @@ -623,7 +620,7 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { sgTags := []string{IngressControllerTag, fmt.Sprintf("%s_%s", ing.Namespace, ing.Name)} tagString := strings.Join(sgTags, ",") opts := groups.ListOpts{Tags: tagString} - sgs, err := c.osClient.GetSecurityGroups(opts) + sgs, err := c.osClient.GetSecurityGroups(ctx, opts) if err != nil { return fmt.Errorf("failed to get security groups for ingress %s: %v", key, err) } @@ -634,10 +631,10 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { } for _, sg := range sgs { - if err = c.osClient.EnsurePortSecurityGroup(true, sg.ID, nodes); err != nil { + if err = c.osClient.EnsurePortSecurityGroup(ctx, true, sg.ID, nodes); err != nil { return fmt.Errorf("failed to operate on the port security groups for ingress %s: %v", key, err) } - if _, err = c.osClient.EnsureSecurityGroup(true, "", "", sgTags); err != nil { + if _, err = c.osClient.EnsureSecurityGroup(ctx, true, "", "", sgTags); err != nil { return fmt.Errorf("failed to delete the security groups for ingress %s: %v", key, err) } } @@ -645,7 +642,7 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Info("security group deleted") } - err = openstackutil.DeleteLoadbalancer(c.osClient.Octavia, loadbalancer.ID, true) + err = openstackutil.DeleteLoadbalancer(ctx, c.osClient.Octavia, loadbalancer.ID, true) if err != nil { logger.WithFields(log.Fields{"lbID": loadbalancer.ID}).Infof("loadbalancer delete failed: %v", err) } else { @@ -655,7 +652,7 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { // Delete Barbican secrets if c.osClient.Barbican != nil && ing.Spec.TLS != nil { nameFilter := fmt.Sprintf("kube_ingress_%s_%s_%s", c.config.ClusterName, ing.Namespace, ing.Name) - if err := openstackutil.DeleteSecrets(c.osClient.Barbican, nameFilter); err != nil { + if err := openstackutil.DeleteSecrets(ctx, c.osClient.Barbican, nameFilter); err != nil { return fmt.Errorf("failed to remove Barbican secrets: %v", err) } @@ -665,8 +662,8 @@ func (c *Controller) deleteIngress(ing *nwv1.Ingress) error { return err } -func (c *Controller) toBarbicanSecret(name string, namespace string, toSecretName string) (string, error) { - secret, err := c.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, apimetav1.GetOptions{}) +func (c *Controller) toBarbicanSecret(ctx context.Context, name string, namespace string, toSecretName string) (string, error) { + secret, err := c.kubeClient.CoreV1().Secrets(namespace).Get(ctx, name, apimetav1.GetOptions{}) if err != nil { // TODO(lingxiankong): Creating secret on the fly not supported yet. return "", err @@ -704,10 +701,10 @@ func (c *Controller) toBarbicanSecret(name string, namespace string, toSecretNam } encoded := base64.StdEncoding.EncodeToString(pfxData) - return openstackutil.EnsureSecret(c.osClient.Barbican, toSecretName, "application/octet-stream", encoded) + return openstackutil.EnsureSecret(ctx, c.osClient.Barbican, toSecretName, "application/octet-stream", encoded) } -func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { +func (c *Controller) ensureIngress(ctx context.Context, ing *nwv1.Ingress) error { ingName := ing.ObjectMeta.Name ingNamespace := ing.ObjectMeta.Namespace clusterName := c.config.ClusterName @@ -719,7 +716,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { return fmt.Errorf("TLS Ingress not supported because of Key Manager service unavailable") } - lb, err := c.osClient.EnsureLoadBalancer(resName, c.config.Octavia.SubnetID, ingNamespace, ingName, clusterName, c.config.Octavia.FlavorID) + lb, err := c.osClient.EnsureLoadBalancer(ctx, resName, c.config.Octavia.SubnetID, ingNamespace, ingName, clusterName, c.config.Octavia.FlavorID) if err != nil { return err } @@ -739,7 +736,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { sgDescription := fmt.Sprintf("Security group created for Ingress %s from cluster %s", ingfullName, clusterName) sgTags := []string{IngressControllerTag, fmt.Sprintf("%s_%s", ingNamespace, ingName)} - sgID, err = c.osClient.EnsureSecurityGroup(false, resName, sgDescription, sgTags) + sgID, err = c.osClient.EnsureSecurityGroup(ctx, false, resName, sgDescription, sgTags) if err != nil { return fmt.Errorf("failed to prepare the security group for the ingress %s: %v", ingfullName, err) } @@ -751,7 +748,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { var secretRefs []string for _, tls := range ing.Spec.TLS { secretName := fmt.Sprintf(BarbicanSecretNameTemplate, clusterName, ingNamespace, ingName, tls.SecretName) - secretRef, err := c.toBarbicanSecret(tls.SecretName, ingNamespace, secretName) + secretRef, err := c.toBarbicanSecret(ctx, tls.SecretName, ingNamespace, secretName) if err != nil { return fmt.Errorf("failed to create Barbican secret: %v", err) } @@ -773,7 +770,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { timeoutTCPInspect := maybeGetIntFromIngressAnnotation(ing, IngressAnnotationTimeoutTCPInspect) listenerAllowedCIDRs := strings.Split(sourceRanges, ",") - listener, err := c.osClient.EnsureListener(resName, lb.ID, secretRefs, listenerAllowedCIDRs, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect) + listener, err := c.osClient.EnsureListener(ctx, resName, lb.ID, secretRefs, listenerAllowedCIDRs, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect) if err != nil { return err } @@ -808,12 +805,12 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { var newPolicies []openstack.IngPolicy var oldPolicies []openstack.ExistingPolicy - existingPolicies, err := openstackutil.GetL7policies(c.osClient.Octavia, listener.ID) + existingPolicies, err := openstackutil.GetL7policies(ctx, c.osClient.Octavia, listener.ID) if err != nil { return fmt.Errorf("failed to get l7 policies for listener %s", listener.ID) } for _, policy := range existingPolicies { - rules, err := openstackutil.GetL7Rules(c.osClient.Octavia, policy.ID) + rules, err := openstackutil.GetL7Rules(ctx, c.osClient.Octavia, policy.ID) if err != nil { return fmt.Errorf("failed to get l7 rules for policy %s", policy.ID) } @@ -823,7 +820,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { }) } - existingPools, err := openstackutil.GetPools(c.osClient.Octavia, lb.ID) + existingPools, err := openstackutil.GetPools(ctx, c.osClient.Octavia, lb.ID) if err != nil { return fmt.Errorf("failed to get pools from load balancer %s, error: %v", lb.ID, err) } @@ -923,21 +920,21 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { // Reconcile octavia resources. rt := openstack.NewResourceTracker(ingfullName, c.osClient.Octavia, lb.ID, listener.ID, newPools, newPolicies, existingPools, oldPolicies) - if err := rt.CreateResources(); err != nil { + if err := rt.CreateResources(ctx); err != nil { return err } - if err := rt.CleanupResources(); err != nil { + if err := rt.CleanupResources(ctx); err != nil { return err } if c.config.Octavia.ManageSecurityGroups { logger.WithFields(log.Fields{"sgID": sgID}).Info("ensuring security group rules") - if err := c.osClient.EnsureSecurityGroupRules(sgID, c.subnetCIDR, nodePorts); err != nil { + if err := c.osClient.EnsureSecurityGroupRules(ctx, sgID, c.subnetCIDR, nodePorts); err != nil { return fmt.Errorf("failed to ensure security group rules for Ingress %s: %v", ingName, err) } - if err := c.osClient.EnsurePortSecurityGroup(false, sgID, nodeObjs); err != nil { + if err := c.osClient.EnsurePortSecurityGroup(ctx, false, sgID, nodeObjs); err != nil { return fmt.Errorf("failed to operate port security group for Ingress %s: %v", ingName, err) } @@ -966,7 +963,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { } else { logger.Info("creating new floating IP") } - address, err = c.osClient.EnsureFloatingIP(false, lb.VipPortID, floatingIPSetting, c.config.Octavia.FloatingIPNetwork, description) + address, err = c.osClient.EnsureFloatingIP(ctx, false, lb.VipPortID, floatingIPSetting, c.config.Octavia.FloatingIPNetwork, description) if err != nil { return fmt.Errorf("failed to use provided floating IP %s : %v", floatingIPSetting, err) } @@ -974,7 +971,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { } // Update ingress status - newIng, err := c.updateIngressStatus(ing, address) + newIng, err := c.updateIngressStatus(ctx, ing, address) if err != nil { return err } @@ -982,7 +979,7 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { // Add ingress resource version to the load balancer description newDes := fmt.Sprintf("Kubernetes Ingress %s in namespace %s from cluster %s, version: %s", ingName, ingNamespace, clusterName, newIng.ResourceVersion) - if err = c.osClient.UpdateLoadBalancerDescription(lb.ID, newDes); err != nil { + if err = c.osClient.UpdateLoadBalancerDescription(ctx, lb.ID, newDes); err != nil { return err } @@ -991,13 +988,13 @@ func (c *Controller) ensureIngress(ing *nwv1.Ingress) error { return nil } -func (c *Controller) updateIngressStatus(ing *nwv1.Ingress, vip string) (*nwv1.Ingress, error) { +func (c *Controller) updateIngressStatus(ctx context.Context, ing *nwv1.Ingress, vip string) (*nwv1.Ingress, error) { newState := new(nwv1.IngressLoadBalancerStatus) newState.Ingress = []nwv1.IngressLoadBalancerIngress{{IP: vip}} newIng := ing.DeepCopy() newIng.Status.LoadBalancer = *newState - newObj, err := c.kubeClient.NetworkingV1().Ingresses(newIng.Namespace).UpdateStatus(context.TODO(), newIng, apimetav1.UpdateOptions{}) + newObj, err := c.kubeClient.NetworkingV1().Ingresses(newIng.Namespace).UpdateStatus(ctx, newIng, apimetav1.UpdateOptions{}) if err != nil { return nil, err } diff --git a/pkg/ingress/controller/openstack/neutron.go b/pkg/ingress/controller/openstack/neutron.go index 8e89646a16..df275eba05 100644 --- a/pkg/ingress/controller/openstack/neutron.go +++ b/pkg/ingress/controller/openstack/neutron.go @@ -35,8 +35,8 @@ import ( "k8s.io/cloud-provider-openstack/pkg/ingress/utils" ) -func (os *OpenStack) getFloatingIPs(listOpts floatingips.ListOpts) ([]floatingips.FloatingIP, error) { - allPages, err := floatingips.List(os.neutron, listOpts).AllPages(context.TODO()) +func (os *OpenStack) getFloatingIPs(ctx context.Context, listOpts floatingips.ListOpts) ([]floatingips.FloatingIP, error) { + allPages, err := floatingips.List(os.neutron, listOpts).AllPages(ctx) if err != nil { return []floatingips.FloatingIP{}, err } @@ -48,36 +48,36 @@ func (os *OpenStack) getFloatingIPs(listOpts floatingips.ListOpts) ([]floatingip return allFIPs, nil } -func (os *OpenStack) createFloatingIP(portID string, floatingNetworkID string, description string) (*floatingips.FloatingIP, error) { +func (os *OpenStack) createFloatingIP(ctx context.Context, portID string, floatingNetworkID string, description string) (*floatingips.FloatingIP, error) { floatIPOpts := floatingips.CreateOpts{ PortID: portID, FloatingNetworkID: floatingNetworkID, Description: description, } - return floatingips.Create(context.TODO(), os.neutron, floatIPOpts).Extract() + return floatingips.Create(ctx, os.neutron, floatIPOpts).Extract() } // associateFloatingIP associate an unused floating IP to a given Port -func (os *OpenStack) associateFloatingIP(fip *floatingips.FloatingIP, portID string, description string) (*floatingips.FloatingIP, error) { +func (os *OpenStack) associateFloatingIP(ctx context.Context, fip *floatingips.FloatingIP, portID string, description string) (*floatingips.FloatingIP, error) { updateOpts := floatingips.UpdateOpts{ PortID: &portID, Description: &description, } - return floatingips.Update(context.TODO(), os.neutron, fip.ID, updateOpts).Extract() + return floatingips.Update(ctx, os.neutron, fip.ID, updateOpts).Extract() } // disassociateFloatingIP disassociate a floating IP from a port -func (os *OpenStack) disassociateFloatingIP(fip *floatingips.FloatingIP, description string) (*floatingips.FloatingIP, error) { +func (os *OpenStack) disassociateFloatingIP(ctx context.Context, fip *floatingips.FloatingIP, description string) (*floatingips.FloatingIP, error) { updateDisassociateOpts := floatingips.UpdateOpts{ PortID: new(string), Description: &description, } - return floatingips.Update(context.TODO(), os.neutron, fip.ID, updateDisassociateOpts).Extract() + return floatingips.Update(ctx, os.neutron, fip.ID, updateDisassociateOpts).Extract() } // GetSubnet get a subnet by the given ID. -func (os *OpenStack) GetSubnet(subnetID string) (*subnets.Subnet, error) { - subnet, err := subnets.Get(context.TODO(), os.neutron, subnetID).Extract() +func (os *OpenStack) GetSubnet(ctx context.Context, subnetID string) (*subnets.Subnet, error) { + subnet, err := subnets.Get(ctx, os.neutron, subnetID).Extract() if err != nil { return nil, err } @@ -85,8 +85,8 @@ func (os *OpenStack) GetSubnet(subnetID string) (*subnets.Subnet, error) { } // getPorts gets all the filtered ports. -func (os *OpenStack) getPorts(listOpts ports.ListOpts) ([]ports.Port, error) { - allPages, err := ports.List(os.neutron, listOpts).AllPages(context.TODO()) +func (os *OpenStack) getPorts(ctx context.Context, listOpts ports.ListOpts) ([]ports.Port, error) { + allPages, err := ports.List(os.neutron, listOpts).AllPages(ctx) if err != nil { return []ports.Port{}, err } @@ -99,9 +99,9 @@ func (os *OpenStack) getPorts(listOpts ports.ListOpts) ([]ports.Port, error) { } // EnsureFloatingIP makes sure a floating IP is allocated for the port -func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfloatingIP string, floatingIPNetwork string, description string) (string, error) { +func (os *OpenStack) EnsureFloatingIP(ctx context.Context, needDelete bool, portID string, existingfloatingIP string, floatingIPNetwork string, description string) (string, error) { listOpts := floatingips.ListOpts{PortID: portID} - fips, err := os.getFloatingIPs(listOpts) + fips, err := os.getFloatingIPs(ctx, listOpts) if err != nil { return "", fmt.Errorf("unable to get floating ips: %w", err) } @@ -109,7 +109,7 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl // If needed, delete the floating IPs and return. if needDelete { for _, fip := range fips { - if err := floatingips.Delete(context.TODO(), os.neutron, fip.ID).ExtractErr(); err != nil { + if err := floatingips.Delete(ctx, os.neutron, fip.ID).ExtractErr(); err != nil { return "", err } } @@ -127,7 +127,7 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl if len(fips) == 1 { fip = &fips[0] } else { - fip, err = os.createFloatingIP(portID, floatingIPNetwork, description) + fip, err = os.createFloatingIP(ctx, portID, floatingIPNetwork, description) if err != nil { return "", err } @@ -139,7 +139,7 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl FloatingIP: existingfloatingIP, FloatingNetworkID: floatingIPNetwork, } - osFips, err := os.getFloatingIPs(opts) + osFips, err := os.getFloatingIPs(ctx, opts) if err != nil { return "", err } @@ -158,7 +158,7 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl // if port don't have fip if len(fips) == 0 { - fip, err = os.associateFloatingIP(&osFips[0], portID, description) + fip, err = os.associateFloatingIP(ctx, &osFips[0], portID, description) if err != nil { return "", err } @@ -168,12 +168,12 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl // "Cannot associate floating IP with port using fixed // IP, as that fixed IP already has a floating IP on // external network" - _, err = os.disassociateFloatingIP(&fips[0], "") + _, err = os.disassociateFloatingIP(ctx, &fips[0], "") if err != nil { return "", err } // associate new fip - fip, err = os.associateFloatingIP(&osFips[0], portID, description) + fip, err = os.associateFloatingIP(ctx, &osFips[0], portID, description) if err != nil { return "", err } @@ -186,8 +186,8 @@ func (os *OpenStack) EnsureFloatingIP(needDelete bool, portID string, existingfl } // GetSecurityGroups gets all the filtered security groups. -func (os *OpenStack) GetSecurityGroups(listOpts groups.ListOpts) ([]groups.SecGroup, error) { - allPages, err := groups.List(os.neutron, listOpts).AllPages(context.TODO()) +func (os *OpenStack) GetSecurityGroups(ctx context.Context, listOpts groups.ListOpts) ([]groups.SecGroup, error) { + allPages, err := groups.List(os.neutron, listOpts).AllPages(ctx) if err != nil { return []groups.SecGroup{}, err } @@ -201,10 +201,10 @@ func (os *OpenStack) GetSecurityGroups(listOpts groups.ListOpts) ([]groups.SecGr // EnsureSecurityGroup make sure the security group with given tags exists or not according to need_delete param. // Make sure the EnsurePortSecurityGroup function is called before EnsureSecurityGroup if you want to delete the security group. -func (os *OpenStack) EnsureSecurityGroup(needDelete bool, name string, description string, tags []string) (string, error) { +func (os *OpenStack) EnsureSecurityGroup(ctx context.Context, needDelete bool, name string, description string, tags []string) (string, error) { tagsString := strings.Join(tags, ",") listOpts := groups.ListOpts{Tags: tagsString} - allGroups, err := os.GetSecurityGroups(listOpts) + allGroups, err := os.GetSecurityGroups(ctx, listOpts) if err != nil { return "", err } @@ -212,7 +212,7 @@ func (os *OpenStack) EnsureSecurityGroup(needDelete bool, name string, descripti // If needed, delete the security groups and return. if needDelete { for _, group := range allGroups { - if err := groups.Delete(context.TODO(), os.neutron, group.ID).ExtractErr(); err != nil { + if err := groups.Delete(ctx, os.neutron, group.ID).ExtractErr(); err != nil { return "", err } } @@ -230,7 +230,7 @@ func (os *OpenStack) EnsureSecurityGroup(needDelete bool, name string, descripti Name: name, Description: description, } - group, err = groups.Create(context.TODO(), os.neutron, createOpts).Extract() + group, err = groups.Create(ctx, os.neutron, createOpts).Extract() if err != nil { return "", err } @@ -242,7 +242,7 @@ func (os *OpenStack) EnsureSecurityGroup(needDelete bool, name string, descripti //} for _, t := range tags { - if err := neutrontags.Add(context.TODO(), os.neutron, "security_groups", group.ID, t).ExtractErr(); err != nil { + if err := neutrontags.Add(ctx, os.neutron, "security_groups", group.ID, t).ExtractErr(); err != nil { return "", fmt.Errorf("failed to add tag %s to security group %s: %v", t, group.ID, err) } } @@ -254,13 +254,13 @@ func (os *OpenStack) EnsureSecurityGroup(needDelete bool, name string, descripti } // EnsureSecurityGroupRules ensures the only dstPorts are allowed in the given security group. -func (os *OpenStack) EnsureSecurityGroupRules(sgID string, sourceIP string, dstPorts []int) error { +func (os *OpenStack) EnsureSecurityGroupRules(ctx context.Context, sgID string, sourceIP string, dstPorts []int) error { listOpts := rules.ListOpts{ Protocol: "tcp", SecGroupID: sgID, RemoteIPPrefix: sourceIP, } - allPages, err := rules.List(os.neutron, listOpts).AllPages(context.TODO()) + allPages, err := rules.List(os.neutron, listOpts).AllPages(ctx) if err != nil { return err } @@ -273,7 +273,7 @@ func (os *OpenStack) EnsureSecurityGroupRules(sgID string, sourceIP string, dstP // Delete all the rules and return. for _, rule := range allRules { - if err := rules.Delete(context.TODO(), os.neutron, rule.ID).ExtractErr(); err != nil { + if err := rules.Delete(ctx, os.neutron, rule.ID).ExtractErr(); err != nil { return err } } @@ -292,7 +292,7 @@ func (os *OpenStack) EnsureSecurityGroupRules(sgID string, sourceIP string, dstP for _, rule := range allRules { if !dstPortsSet.Has(strconv.Itoa(rule.PortRangeMin)) { // Delete the rule - if err := rules.Delete(context.TODO(), os.neutron, rule.ID).ExtractErr(); err != nil { + if err := rules.Delete(ctx, os.neutron, rule.ID).ExtractErr(); err != nil { return err } } else { @@ -316,7 +316,7 @@ func (os *OpenStack) EnsureSecurityGroupRules(sgID string, sourceIP string, dstP RemoteIPPrefix: sourceIP, SecGroupID: sgID, } - if _, err := rules.Create(context.TODO(), os.neutron, createOpts).Extract(); err != nil { + if _, err := rules.Create(ctx, os.neutron, createOpts).Extract(); err != nil { return err } } @@ -326,14 +326,14 @@ func (os *OpenStack) EnsureSecurityGroupRules(sgID string, sourceIP string, dstP // EnsurePortSecurityGroup ensures the security group is attached to all the node ports or detached from all the ports // according to needDelete param. -func (os *OpenStack) EnsurePortSecurityGroup(needDelete bool, sgID string, nodes []*v1.Node) error { +func (os *OpenStack) EnsurePortSecurityGroup(ctx context.Context, needDelete bool, sgID string, nodes []*v1.Node) error { for _, node := range nodes { instanceID, err := utils.GetNodeID(node) if err != nil { return err } listOpts := ports.ListOpts{DeviceID: instanceID} - allPorts, err := os.getPorts(listOpts) + allPorts, err := os.getPorts(ctx, listOpts) if err != nil { return err } @@ -346,7 +346,7 @@ func (os *OpenStack) EnsurePortSecurityGroup(needDelete bool, sgID string, nodes sgSet.Delete(sgID) newSGs := sets.List(sgSet) updateOpts := ports.UpdateOpts{SecurityGroups: &newSGs} - if _, err := ports.Update(context.TODO(), os.neutron, port.ID, updateOpts).Extract(); err != nil { + if _, err := ports.Update(ctx, os.neutron, port.ID, updateOpts).Extract(); err != nil { return err } @@ -358,7 +358,7 @@ func (os *OpenStack) EnsurePortSecurityGroup(needDelete bool, sgID string, nodes sgSet.Insert(sgID) newSGs := sets.List(sgSet) updateOpts := ports.UpdateOpts{SecurityGroups: &newSGs} - if _, err := ports.Update(context.TODO(), os.neutron, port.ID, updateOpts).Extract(); err != nil { + if _, err := ports.Update(ctx, os.neutron, port.ID, updateOpts).Extract(); err != nil { return err } diff --git a/pkg/ingress/controller/openstack/octavia.go b/pkg/ingress/controller/openstack/octavia.go index c846c1e5fc..c0d356bb60 100644 --- a/pkg/ingress/controller/openstack/octavia.go +++ b/pkg/ingress/controller/openstack/octavia.go @@ -153,7 +153,7 @@ func NewResourceTracker(ingressName string, client *gophercloud.ServiceClient, l } // createResources only creates resources when necessary. -func (rt *ResourceTracker) CreateResources() error { +func (rt *ResourceTracker) CreateResources(ctx context.Context) error { poolMapping := make(map[string]string) for _, pool := range rt.newPools { // Different ingress paths may configure the same service, but we only need to create one pool. @@ -164,7 +164,7 @@ func (rt *ResourceTracker) CreateResources() error { poolID, isPresent := rt.oldPoolMapping[pool.Name] if !isPresent { rt.logger.WithFields(log.Fields{"poolName": pool.Name}).Info("creating pool") - newPool, err := openstackutil.CreatePool(rt.client, pool.Opts, rt.lbID) + newPool, err := openstackutil.CreatePool(ctx, rt.client, pool.Opts, rt.lbID) if err != nil { return fmt.Errorf("failed to create pool %s, error: %v", pool.Name, err) } @@ -176,7 +176,7 @@ func (rt *ResourceTracker) CreateResources() error { poolMapping[pool.Name] = poolID rt.logger.WithFields(log.Fields{"poolName": pool.Name, "poolID": poolID}).Info("updating pool members") - if err := openstackutil.BatchUpdatePoolMembers(rt.client, rt.lbID, poolID, pool.PoolMembers); err != nil { + if err := openstackutil.BatchUpdatePoolMembers(ctx, rt.client, rt.lbID, poolID, pool.PoolMembers); err != nil { return fmt.Errorf("failed to update pool members, error: %v", err) } rt.logger.WithFields(log.Fields{"poolName": pool.Name, "poolID": poolID}).Info("pool members updated ") @@ -202,7 +202,7 @@ func (rt *ResourceTracker) CreateResources() error { // Create new policy with rules rt.logger.WithFields(log.Fields{"listenerID": rt.listenerID, "poolID": poolID}).Info("creating l7 policy") policy.Opts.RedirectPoolID = poolID - newPolicy, err := openstackutil.CreateL7Policy(rt.client, policy.Opts, rt.lbID) + newPolicy, err := openstackutil.CreateL7Policy(ctx, rt.client, policy.Opts, rt.lbID) if err != nil { return fmt.Errorf("failed to create l7policy, error: %v", err) } @@ -210,7 +210,7 @@ func (rt *ResourceTracker) CreateResources() error { rt.logger.WithFields(log.Fields{"listenerID": rt.listenerID, "policyID": newPolicy.ID}).Info("creating l7 rules") for _, opt := range policy.RulesOpts { - if err := openstackutil.CreateL7Rule(rt.client, newPolicy.ID, opt, rt.lbID); err != nil { + if err := openstackutil.CreateL7Rule(ctx, rt.client, newPolicy.ID, opt, rt.lbID); err != nil { return fmt.Errorf("failed to create l7 rules for policy %s, error: %v", newPolicy.ID, err) } } @@ -225,13 +225,13 @@ func (rt *ResourceTracker) CreateResources() error { return nil } -func (rt *ResourceTracker) CleanupResources() error { +func (rt *ResourceTracker) CleanupResources(ctx context.Context) error { for key, oldPolicy := range rt.oldPolicyMapping { poolID, isPresent := rt.newPolicyRuleMapping[key] if !isPresent || poolID != oldPolicy.Policy.RedirectPoolID { // Delete invalid policy rt.logger.WithFields(log.Fields{"policyID": oldPolicy.Policy.ID}).Info("deleting policy") - if err := openstackutil.DeleteL7policy(rt.client, oldPolicy.Policy.ID, rt.lbID); err != nil { + if err := openstackutil.DeleteL7policy(ctx, rt.client, oldPolicy.Policy.ID, rt.lbID); err != nil { return fmt.Errorf("failed to delete l7 policy %s, error: %v", oldPolicy.Policy.ID, err) } rt.logger.WithFields(log.Fields{"policyID": oldPolicy.Policy.ID}).Info("policy deleted") @@ -242,7 +242,7 @@ func (rt *ResourceTracker) CleanupResources() error { if !rt.newPoolNames.Has(pool.Name) { // Delete unused pool rt.logger.WithFields(log.Fields{"poolID": pool.ID}).Info("deleting pool") - if err := openstackutil.DeletePool(rt.client, pool.ID, rt.lbID); err != nil { + if err := openstackutil.DeletePool(ctx, rt.client, pool.ID, rt.lbID); err != nil { return fmt.Errorf("failed to delete pool %s, error: %v", pool.ID, err) } rt.logger.WithFields(log.Fields{"poolID": pool.ID}).Info("pool deleted") @@ -252,7 +252,7 @@ func (rt *ResourceTracker) CleanupResources() error { return nil } -func (os *OpenStack) waitLoadbalancerActiveProvisioningStatus(loadbalancerID string) (string, error) { +func (os *OpenStack) waitLoadbalancerActiveProvisioningStatus(ctx context.Context, loadbalancerID string) (string, error) { backoff := wait.Backoff{ Duration: loadbalancerActiveInitDealy, Factor: loadbalancerActiveFactor, @@ -261,7 +261,7 @@ func (os *OpenStack) waitLoadbalancerActiveProvisioningStatus(loadbalancerID str var provisioningStatus string err := wait.ExponentialBackoff(backoff, func() (bool, error) { - loadbalancer, err := loadbalancers.Get(context.TODO(), os.Octavia, loadbalancerID).Extract() + loadbalancer, err := loadbalancers.Get(ctx, os.Octavia, loadbalancerID).Extract() if err != nil { return false, err } @@ -283,10 +283,10 @@ func (os *OpenStack) waitLoadbalancerActiveProvisioningStatus(loadbalancerID str } // EnsureLoadBalancer creates a loadbalancer in octavia if it does not exist, wait for the loadbalancer to be ACTIVE. -func (os *OpenStack) EnsureLoadBalancer(name string, subnetID string, ingNamespace string, ingName string, clusterName string, flavorId string) (*loadbalancers.LoadBalancer, error) { +func (os *OpenStack) EnsureLoadBalancer(ctx context.Context, name string, subnetID string, ingNamespace string, ingName string, clusterName string, flavorId string) (*loadbalancers.LoadBalancer, error) { logger := log.WithFields(log.Fields{"ingress": fmt.Sprintf("%s/%s", ingNamespace, ingName)}) - loadbalancer, err := openstackutil.GetLoadbalancerByName(os.Octavia, name) + loadbalancer, err := openstackutil.GetLoadbalancerByName(ctx, os.Octavia, name) if err != nil { if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting loadbalancer %s: %v", name, err) @@ -299,7 +299,7 @@ func (os *OpenStack) EnsureLoadBalancer(name string, subnetID string, ingNamespa Provider: os.config.Octavia.Provider, FlavorID: flavorId, } - loadbalancer, err = loadbalancers.Create(context.TODO(), os.Octavia, createOpts).Extract() + loadbalancer, err = loadbalancers.Create(ctx, os.Octavia, createOpts).Extract() if err != nil { return nil, fmt.Errorf("error creating loadbalancer %v: %v", createOpts, err) } @@ -309,7 +309,7 @@ func (os *OpenStack) EnsureLoadBalancer(name string, subnetID string, ingNamespa logger.WithFields(log.Fields{"lbName": name, "lbID": loadbalancer.ID}).Debug("loadbalancer exists") } - _, err = os.waitLoadbalancerActiveProvisioningStatus(loadbalancer.ID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, loadbalancer.ID) if err != nil { return nil, fmt.Errorf("loadbalancer %s not in ACTIVE status, error: %v", loadbalancer.ID, err) } @@ -318,8 +318,8 @@ func (os *OpenStack) EnsureLoadBalancer(name string, subnetID string, ingNamespa } // UpdateLoadBalancerDescription updates the load balancer description field. -func (os *OpenStack) UpdateLoadBalancerDescription(lbID string, newDescription string) error { - _, err := loadbalancers.Update(context.TODO(), os.Octavia, lbID, loadbalancers.UpdateOpts{ +func (os *OpenStack) UpdateLoadBalancerDescription(ctx context.Context, lbID string, newDescription string) error { + _, err := loadbalancers.Update(ctx, os.Octavia, lbID, loadbalancers.UpdateOpts{ Description: &newDescription, }).Extract() if err != nil { @@ -331,8 +331,8 @@ func (os *OpenStack) UpdateLoadBalancerDescription(lbID string, newDescription s } // EnsureListener creates a loadbalancer listener in octavia if it does not exist, wait for the loadbalancer to be ACTIVE. -func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []string, listenerAllowedCIDRs []string, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect *int) (*listeners.Listener, error) { - listener, err := openstackutil.GetListenerByName(os.Octavia, name, lbID) +func (os *OpenStack) EnsureListener(ctx context.Context, name string, lbID string, secretRefs []string, listenerAllowedCIDRs []string, timeoutClientData, timeoutMemberData, timeoutTCPInspect, timeoutMemberConnect *int) (*listeners.Listener, error) { + listener, err := openstackutil.GetListenerByName(ctx, os.Octavia, name, lbID) if err != nil { if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting listener %s: %v", name, err) @@ -359,7 +359,7 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin if len(listenerAllowedCIDRs) > 0 { opts.AllowedCIDRs = listenerAllowedCIDRs } - listener, err = listeners.Create(context.TODO(), os.Octavia, opts).Extract() + listener, err = listeners.Create(ctx, os.Octavia, opts).Extract() if err != nil { return nil, fmt.Errorf("error creating listener: %v", err) } @@ -382,7 +382,7 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin } if updateOpts != (listeners.UpdateOpts{}) { - _, err := listeners.Update(context.TODO(), os.Octavia, listener.ID, updateOpts).Extract() + _, err := listeners.Update(ctx, os.Octavia, listener.ID, updateOpts).Extract() if err != nil { return nil, fmt.Errorf("failed to update listener options: %v", err) } @@ -391,7 +391,7 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin } } - _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, lbID) if err != nil { return nil, fmt.Errorf("loadbalancer %s not in ACTIVE status after creating listener, error: %v", lbID, err) } @@ -400,11 +400,11 @@ func (os *OpenStack) EnsureListener(name string, lbID string, secretRefs []strin } // EnsurePoolMembers ensure the pool and its members exist if deleted flag is not set, delete the pool and all its members otherwise. -func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID string, listenerID string, nodePort *int, nodes []*apiv1.Node) (*string, error) { +func (os *OpenStack) EnsurePoolMembers(ctx context.Context, deleted bool, poolName string, lbID string, listenerID string, nodePort *int, nodes []*apiv1.Node) (*string, error) { logger := log.WithFields(log.Fields{"lbID": lbID, "listenerID": listenerID, "poolName": poolName}) if deleted { - pool, err := openstackutil.GetPoolByName(os.Octavia, poolName, lbID) + pool, err := openstackutil.GetPoolByName(ctx, os.Octavia, poolName, lbID) if err != nil { if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting pool %s: %v", poolName, err) @@ -413,12 +413,12 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin } // Delete the existing pool, members are deleted automatically - err = pools.Delete(context.TODO(), os.Octavia, pool.ID).ExtractErr() + err = pools.Delete(ctx, os.Octavia, pool.ID).ExtractErr() if err != nil && !cpoerrors.IsNotFound(err) { return nil, fmt.Errorf("error deleting pool %s: %v", pool.ID, err) } - _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, lbID) if err != nil { return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) } @@ -426,7 +426,7 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin return nil, nil } - pool, err := openstackutil.GetPoolByName(os.Octavia, poolName, lbID) + pool, err := openstackutil.GetPoolByName(ctx, os.Octavia, poolName, lbID) if err != nil { if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting pool %s: %v", poolName, err) @@ -453,7 +453,7 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin Persistence: nil, } } - pool, err = pools.Create(context.TODO(), os.Octavia, opts).Extract() + pool, err = pools.Create(ctx, os.Octavia, opts).Extract() if err != nil { return nil, fmt.Errorf("error creating pool: %v", err) } @@ -462,7 +462,7 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin } - _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, lbID) if err != nil { return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) } @@ -470,11 +470,11 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin if os.config.Octavia.ProviderRequiresSerialAPICalls { logger.Info("updating pool members using serial API calls") // Serially update pool members - err = openstackutil.SeriallyReconcilePoolMembers(os.Octavia, pool, *nodePort, lbID, nodes) + err = openstackutil.SeriallyReconcilePoolMembers(ctx, os.Octavia, pool, *nodePort, lbID, nodes) if err != nil { return nil, fmt.Errorf("error reconciling pool members for pool %s: %v", pool.ID, err) } - _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, lbID) if err != nil { return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) } @@ -506,10 +506,10 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin return nil, fmt.Errorf("error because no members in pool: %s", pool.ID) } - if err := pools.BatchUpdateMembers(context.TODO(), os.Octavia, pool.ID, members).ExtractErr(); err != nil { + if err := pools.BatchUpdateMembers(ctx, os.Octavia, pool.ID, members).ExtractErr(); err != nil { return nil, fmt.Errorf("error batch updating members for pool %s: %v", pool.ID, err) } - _, err = os.waitLoadbalancerActiveProvisioningStatus(lbID) + _, err = os.waitLoadbalancerActiveProvisioningStatus(ctx, lbID) if err != nil { return nil, fmt.Errorf("error waiting for loadbalancer %s to be active: %v", lbID, err) } @@ -520,8 +520,8 @@ func (os *OpenStack) EnsurePoolMembers(deleted bool, poolName string, lbID strin } // UpdateLoadbalancerMembers update members for all the pools in the specified load balancer. -func (os *OpenStack) UpdateLoadbalancerMembers(lbID string, nodes []*apiv1.Node) error { - lbPools, err := openstackutil.GetPools(os.Octavia, lbID) +func (os *OpenStack) UpdateLoadbalancerMembers(ctx context.Context, lbID string, nodes []*apiv1.Node) error { + lbPools, err := openstackutil.GetPools(ctx, os.Octavia, lbID) if err != nil { return err } @@ -529,7 +529,7 @@ func (os *OpenStack) UpdateLoadbalancerMembers(lbID string, nodes []*apiv1.Node) for _, pool := range lbPools { log.WithFields(log.Fields{"poolID": pool.ID}).Debug("Starting to update pool members") - members, err := openstackutil.GetMembersbyPool(os.Octavia, pool.ID) + members, err := openstackutil.GetMembersbyPool(ctx, os.Octavia, pool.ID) if err != nil { log.WithFields(log.Fields{"poolID": pool.ID}).Errorf("Failed to get pool members: %v", err) continue @@ -538,7 +538,7 @@ func (os *OpenStack) UpdateLoadbalancerMembers(lbID string, nodes []*apiv1.Node) // Members have the same ProtocolPort nodePort := members[0].ProtocolPort - if _, err = os.EnsurePoolMembers(false, pool.Name, lbID, "", &nodePort, nodes); err != nil { + if _, err = os.EnsurePoolMembers(ctx, false, pool.Name, lbID, "", &nodePort, nodes); err != nil { return err } diff --git a/pkg/kms/barbican/barbican.go b/pkg/kms/barbican/barbican.go index f56b9a77df..ab36c8c3c8 100644 --- a/pkg/kms/barbican/barbican.go +++ b/pkg/kms/barbican/barbican.go @@ -38,12 +38,12 @@ func NewBarbicanClient(cfg Config) (*gophercloud.ServiceClient, error) { } // GetSecret gets unencrypted secret -func (barbican *Barbican) GetSecret(keyID string) ([]byte, error) { +func (barbican *Barbican) GetSecret(ctx context.Context, keyID string) ([]byte, error) { opts := secrets.GetPayloadOpts{ PayloadContentType: "application/octet-stream", } - key, err := secrets.GetPayload(context.TODO(), barbican.Client, keyID, opts).Extract() + key, err := secrets.GetPayload(ctx, barbican.Client, keyID, opts).Extract() if err != nil { return nil, err } diff --git a/pkg/kms/barbican/fake_barbican.go b/pkg/kms/barbican/fake_barbican.go index 4b2772cdb4..5efb6de0da 100644 --- a/pkg/kms/barbican/fake_barbican.go +++ b/pkg/kms/barbican/fake_barbican.go @@ -1,10 +1,13 @@ package barbican -import "encoding/hex" +import ( + "context" + "encoding/hex" +) type FakeBarbican struct { } -func (client *FakeBarbican) GetSecret(keyID string) ([]byte, error) { +func (client *FakeBarbican) GetSecret(_ context.Context, keyID string) ([]byte, error) { return hex.DecodeString("6368616e676520746869732070617373") } diff --git a/pkg/kms/server/server.go b/pkg/kms/server/server.go index b3f0242032..ff78744cab 100644 --- a/pkg/kms/server/server.go +++ b/pkg/kms/server/server.go @@ -22,7 +22,7 @@ const ( ) type BarbicanService interface { - GetSecret(keyID string) ([]byte, error) + GetSecret(ctx context.Context, keyID string) ([]byte, error) } // KMSserver struct @@ -116,7 +116,7 @@ func (s *KMSserver) Decrypt(ctx context.Context, req *pb.DecryptRequest) (*pb.De klog.V(4).Infof("Decrypt Request by Kubernetes api server") // TODO: consider using req.KeyId - key, err := s.barbican.GetSecret(s.cfg.KeyManager.KeyID) + key, err := s.barbican.GetSecret(ctx, s.cfg.KeyManager.KeyID) if err != nil { klog.V(4).Infof("Failed to get key %v: ", err) return nil, err @@ -135,7 +135,7 @@ func (s *KMSserver) Decrypt(ctx context.Context, req *pb.DecryptRequest) (*pb.De func (s *KMSserver) Encrypt(ctx context.Context, req *pb.EncryptRequest) (*pb.EncryptResponse, error) { klog.V(4).Infof("Encrypt Request by Kubernetes api server") - key, err := s.barbican.GetSecret(s.cfg.KeyManager.KeyID) + key, err := s.barbican.GetSecret(ctx, s.cfg.KeyManager.KeyID) if err != nil { klog.V(4).Infof("Failed to get key %v: ", err) diff --git a/pkg/openstack/loadbalancer.go b/pkg/openstack/loadbalancer.go index 6083fbc955..03afff336f 100644 --- a/pkg/openstack/loadbalancer.go +++ b/pkg/openstack/loadbalancer.go @@ -154,13 +154,13 @@ type listenerKey struct { } // getLoadbalancerByName get the load balancer which is in valid status by the given name/legacy name. -func getLoadbalancerByName(client *gophercloud.ServiceClient, name string, legacyName string) (*loadbalancers.LoadBalancer, error) { +func getLoadbalancerByName(ctx context.Context, client *gophercloud.ServiceClient, name string, legacyName string) (*loadbalancers.LoadBalancer, error) { var validLBs []loadbalancers.LoadBalancer opts := loadbalancers.ListOpts{ Name: name, } - allLoadbalancers, err := openstackutil.GetLoadBalancers(client, opts) + allLoadbalancers, err := openstackutil.GetLoadBalancers(ctx, client, opts) if err != nil { return nil, err } @@ -171,7 +171,7 @@ func getLoadbalancerByName(client *gophercloud.ServiceClient, name string, legac opts := loadbalancers.ListOpts{ Name: legacyName, } - allLoadbalancers, err = openstackutil.GetLoadBalancers(client, opts) + allLoadbalancers, err = openstackutil.GetLoadBalancers(ctx, client, opts) if err != nil { return nil, err } @@ -277,8 +277,8 @@ func (lbaas *LbaasV2) createOctaviaLoadBalancer(ctx context.Context, name, clust if !lbaas.opts.ProviderRequiresSerialAPICalls { for portIndex, port := range service.Spec.Ports { - listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf, cpoutil.Sprintf255(listenerFormat, portIndex, name)) - members, newMembers, err := lbaas.buildCreateMemberOpts(port, nodes, svcConf) + listenerCreateOpt := lbaas.buildListenerCreateOpt(ctx, port, svcConf, cpoutil.Sprintf255(listenerFormat, portIndex, name)) + members, newMembers, err := lbaas.buildCreateMemberOpts(ctx, port, nodes, svcConf) if err != nil { return nil, err } @@ -287,7 +287,7 @@ func (lbaas *LbaasV2) createOctaviaLoadBalancer(ctx context.Context, name, clust // Pool name must be provided to create fully populated loadbalancer var withHealthMonitor string if svcConf.enableMonitor { - opts := lbaas.buildMonitorCreateOpts(svcConf, port, cpoutil.Sprintf255(monitorFormat, portIndex, name)) + opts := lbaas.buildMonitorCreateOpts(ctx, svcConf, port, cpoutil.Sprintf255(monitorFormat, portIndex, name)) poolCreateOpt.Monitor = &opts withHealthMonitor = " with healthmonitor" } @@ -313,10 +313,10 @@ func (lbaas *LbaasV2) createOctaviaLoadBalancer(ctx context.Context, name, clust svcConf.lbMemberSubnetID = loadbalancer.VipSubnetID } - if loadbalancer, err = openstackutil.WaitActiveAndGetLoadBalancer(lbaas.lb, loadbalancer.ID); err != nil { + if loadbalancer, err = openstackutil.WaitActiveAndGetLoadBalancer(ctx, lbaas.lb, loadbalancer.ID); err != nil { if loadbalancer != nil && loadbalancer.ProvisioningStatus == errorStatus { // If LB landed in ERROR state we should delete it and retry the creation later. - if err = lbaas.deleteLoadBalancer(loadbalancer, service, svcConf, true); err != nil { + if err = lbaas.deleteLoadBalancer(ctx, loadbalancer, service, svcConf, true); err != nil { return nil, fmt.Errorf("loadbalancer %s is in ERROR state and there was an error when removing it: %v", loadbalancer.ID, err) } return nil, fmt.Errorf("loadbalancer %s has gone into ERROR state, please check Octavia for details. Load balancer was "+ @@ -337,9 +337,9 @@ func (lbaas *LbaasV2) GetLoadBalancer(ctx context.Context, clusterName string, s var err error if lbID != "" { - loadbalancer, err = openstackutil.GetLoadbalancerByID(lbaas.lb, lbID) + loadbalancer, err = openstackutil.GetLoadbalancerByID(ctx, lbaas.lb, lbID) } else { - loadbalancer, err = getLoadbalancerByName(lbaas.lb, name, legacyName) + loadbalancer, err = getLoadbalancerByName(ctx, lbaas.lb, name, legacyName) } if err != nil && cpoerrors.IsNotFound(err) { return nil, false, nil @@ -520,9 +520,9 @@ func getSubnetIDForLB(ctx context.Context, network *gophercloud.ServiceClient, n } // isPortMember returns true if IP and subnetID are one of the FixedIPs on the port -func isPortMember(port PortWithPortSecurity, IP string, subnetID string) bool { +func isPortMember(port PortWithPortSecurity, ip string, subnetID string) bool { for _, fixedIP := range port.FixedIPs { - if (subnetID == "" || subnetID == fixedIP.SubnetID) && IP == fixedIP.IPAddress { + if (subnetID == "" || subnetID == fixedIP.SubnetID) && ip == fixedIP.IPAddress { return true } } @@ -530,24 +530,24 @@ func isPortMember(port PortWithPortSecurity, IP string, subnetID string) bool { } // deleteListeners deletes listeners and its default pool. -func (lbaas *LbaasV2) deleteListeners(lbID string, listenerList []listeners.Listener) error { +func (lbaas *LbaasV2) deleteListeners(ctx context.Context, lbID string, listenerList []listeners.Listener) error { for _, listener := range listenerList { klog.InfoS("Deleting listener", "listenerID", listener.ID, "lbID", lbID) - pool, err := openstackutil.GetPoolByListener(lbaas.lb, lbID, listener.ID) + pool, err := openstackutil.GetPoolByListener(ctx, lbaas.lb, lbID, listener.ID) if err != nil && err != cpoerrors.ErrNotFound { return fmt.Errorf("error getting pool for obsolete listener %s: %v", listener.ID, err) } if pool != nil { klog.InfoS("Deleting pool", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) // Delete pool automatically deletes all its members. - if err := openstackutil.DeletePool(lbaas.lb, pool.ID, lbID); err != nil { + if err := openstackutil.DeletePool(ctx, lbaas.lb, pool.ID, lbID); err != nil { return err } klog.InfoS("Deleted pool", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) } - if err := openstackutil.DeleteListener(lbaas.lb, listener.ID, lbID); err != nil { + if err := openstackutil.DeleteListener(ctx, lbaas.lb, listener.ID, lbID); err != nil { return err } klog.InfoS("Deleted listener", "listenerID", listener.ID, "lbID", lbID) @@ -557,13 +557,13 @@ func (lbaas *LbaasV2) deleteListeners(lbID string, listenerList []listeners.List } // deleteOctaviaListeners is used not simply for deleting listeners but only deleting listeners used to be created by the Service. -func (lbaas *LbaasV2) deleteOctaviaListeners(lbID string, listenerList []listeners.Listener, isLBOwner bool, lbName string) error { +func (lbaas *LbaasV2) deleteOctaviaListeners(ctx context.Context, lbID string, listenerList []listeners.Listener, isLBOwner bool, lbName string) error { for _, listener := range listenerList { // If the listener was created by this Service before or after supporting shared LB. if (isLBOwner && len(listener.Tags) == 0) || slices.Contains(listener.Tags, lbName) { klog.InfoS("Deleting listener", "listenerID", listener.ID, "lbID", lbID) - pool, err := openstackutil.GetPoolByListener(lbaas.lb, lbID, listener.ID) + pool, err := openstackutil.GetPoolByListener(ctx, lbaas.lb, lbID, listener.ID) if err != nil && err != cpoerrors.ErrNotFound { return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } @@ -571,13 +571,13 @@ func (lbaas *LbaasV2) deleteOctaviaListeners(lbID string, listenerList []listene klog.InfoS("Deleting pool", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) // Delete pool automatically deletes all its members. - if err := openstackutil.DeletePool(lbaas.lb, pool.ID, lbID); err != nil { + if err := openstackutil.DeletePool(ctx, lbaas.lb, pool.ID, lbID); err != nil { return err } klog.InfoS("Deleted pool", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) } - if err := openstackutil.DeleteListener(lbaas.lb, listener.ID, lbID); err != nil { + if err := openstackutil.DeleteListener(ctx, lbaas.lb, listener.ID, lbID); err != nil { return err } @@ -770,7 +770,7 @@ func (lbaas *LbaasV2) ensureFloatingIP(ctx context.Context, clusterName string, return lb.VipAddress, nil } -func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool *v2pools.Pool, port corev1.ServicePort, svcConf *serviceConfig) error { +func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(ctx context.Context, lbID string, name string, pool *v2pools.Pool, port corev1.ServicePort, svcConf *serviceConfig) error { monitorID := pool.MonitorID if monitorID == "" { @@ -781,31 +781,31 @@ func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool // a new monitor must be created klog.V(2).Infof("Creating monitor for pool %s", pool.ID) - createOpts := lbaas.buildMonitorCreateOpts(svcConf, port, name) - return lbaas.createOctaviaHealthMonitor(createOpts, pool.ID, lbID) + createOpts := lbaas.buildMonitorCreateOpts(ctx, svcConf, port, name) + return lbaas.createOctaviaHealthMonitor(ctx, createOpts, pool.ID, lbID) } // an existing monitor must be deleted if !svcConf.enableMonitor { klog.Infof("Deleting health monitor %s for pool %s", monitorID, pool.ID) - return openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID) + return openstackutil.DeleteHealthMonitor(ctx, lbaas.lb, monitorID, lbID) } // get an existing monitor status - monitor, err := openstackutil.GetHealthMonitor(lbaas.lb, monitorID) + monitor, err := openstackutil.GetHealthMonitor(ctx, lbaas.lb, monitorID) if err != nil { // return err on 404 is ok, since we get monitorID dynamically from the pool return err } // recreate health monitor with a new type - createOpts := lbaas.buildMonitorCreateOpts(svcConf, port, name) + createOpts := lbaas.buildMonitorCreateOpts(ctx, svcConf, port, name) if createOpts.Type != monitor.Type { klog.InfoS("Recreating health monitor for the pool", "pool", pool.ID, "oldMonitor", monitorID) - if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, lbID); err != nil { + if err := openstackutil.DeleteHealthMonitor(ctx, lbaas.lb, monitorID, lbID); err != nil { return err } - return lbaas.createOctaviaHealthMonitor(createOpts, pool.ID, lbID) + return lbaas.createOctaviaHealthMonitor(ctx, createOpts, pool.ID, lbID) } // update new monitor parameters @@ -822,13 +822,13 @@ func (lbaas *LbaasV2) ensureOctaviaHealthMonitor(lbID string, name string, pool MaxRetriesDown: svcConf.healthMonitorMaxRetriesDown, } klog.Infof("Updating health monitor %s updateOpts %+v", monitorID, updateOpts) - return openstackutil.UpdateHealthMonitor(lbaas.lb, monitorID, updateOpts, lbID) + return openstackutil.UpdateHealthMonitor(ctx, lbaas.lb, monitorID, updateOpts, lbID) } return nil } -func (lbaas *LbaasV2) canUseHTTPMonitor(port corev1.ServicePort) bool { +func (lbaas *LbaasV2) canUseHTTPMonitor(ctx context.Context, port corev1.ServicePort) bool { if lbaas.opts.LBProvider == "ovn" { // ovn-octavia-provider doesn't support HTTP monitors at all. We got to avoid creating it with ovn. return false @@ -836,14 +836,14 @@ func (lbaas *LbaasV2) canUseHTTPMonitor(port corev1.ServicePort) bool { if port.Protocol == corev1.ProtocolUDP { // Older Octavia versions or OVN provider doesn't support HTTP monitors on UDP pools. We got to check if that's the case. - return openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureHTTPMonitorsOnUDP, lbaas.opts.LBProvider) + return openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureHTTPMonitorsOnUDP, lbaas.opts.LBProvider) } return true } // buildMonitorCreateOpts returns a v2monitors.CreateOpts without PoolID for consumption of both, fully popuplated Loadbalancers and Monitors. -func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1.ServicePort, name string) v2monitors.CreateOpts { +func (lbaas *LbaasV2) buildMonitorCreateOpts(ctx context.Context, svcConf *serviceConfig, port corev1.ServicePort, name string) v2monitors.CreateOpts { opts := v2monitors.CreateOpts{ Name: name, Type: string(port.Protocol), @@ -855,7 +855,7 @@ func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1 if port.Protocol == corev1.ProtocolUDP { opts.Type = "UDP-CONNECT" } - if svcConf.healthCheckNodePort > 0 && lbaas.canUseHTTPMonitor(port) { + if svcConf.healthCheckNodePort > 0 && lbaas.canUseHTTPMonitor(ctx, port) { opts.Type = "HTTP" opts.URLPath = "/healthz" opts.HTTPMethod = "GET" @@ -864,10 +864,10 @@ func (lbaas *LbaasV2) buildMonitorCreateOpts(svcConf *serviceConfig, port corev1 return opts } -func (lbaas *LbaasV2) createOctaviaHealthMonitor(createOpts v2monitors.CreateOpts, poolID, lbID string) error { +func (lbaas *LbaasV2) createOctaviaHealthMonitor(ctx context.Context, createOpts v2monitors.CreateOpts, poolID, lbID string) error { // populate PoolID, attribute is omitted for consumption of the createOpts for fully populated Loadbalancer createOpts.PoolID = poolID - monitor, err := openstackutil.CreateHealthMonitor(lbaas.lb, createOpts, lbID) + monitor, err := openstackutil.CreateHealthMonitor(ctx, lbaas.lb, createOpts, lbID) if err != nil { return err } @@ -877,8 +877,8 @@ func (lbaas *LbaasV2) createOctaviaHealthMonitor(createOpts v2monitors.CreateOpt } // Make sure the pool is created for the Service, nodes are added as pool members. -func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *listeners.Listener, service *corev1.Service, port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) (*v2pools.Pool, error) { - pool, err := openstackutil.GetPoolByListener(lbaas.lb, lbID, listener.ID) +func (lbaas *LbaasV2) ensureOctaviaPool(ctx context.Context, lbID string, name string, listener *listeners.Listener, service *corev1.Service, port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) (*v2pools.Pool, error) { + pool, err := openstackutil.GetPoolByListener(ctx, lbaas.lb, lbID, listener.ID) if err != nil && err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } @@ -896,7 +896,7 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list klog.InfoS("Deleting unused pool", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) // Delete pool automatically deletes all its members. - if err := openstackutil.DeletePool(lbaas.lb, pool.ID, lbID); err != nil { + if err := openstackutil.DeletePool(ctx, lbaas.lb, pool.ID, lbID); err != nil { return nil, err } pool = nil @@ -912,7 +912,7 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list } if pool != nil && pool.LBMethod != poolLbMethod { klog.InfoS("Updating LoadBalancer LBMethod", "poolID", pool.ID, "listenerID", listener.ID, "lbID", lbID) - err = openstackutil.UpdatePool(lbaas.lb, lbID, pool.ID, v2pools.UpdateOpts{LBMethod: v2pools.LBMethod(poolLbMethod)}) + err = openstackutil.UpdatePool(ctx, lbaas.lb, lbID, pool.ID, v2pools.UpdateOpts{LBMethod: v2pools.LBMethod(poolLbMethod)}) if err != nil { err = PreserveGopherError(err) msg := fmt.Sprintf("Error updating LB method for LoadBalancer: %v", err) @@ -928,7 +928,7 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list createOpt.ListenerID = listener.ID klog.InfoS("Creating pool", "listenerID", listener.ID, "protocol", createOpt.Protocol) - pool, err = openstackutil.CreatePool(lbaas.lb, createOpt, lbID) + pool, err = openstackutil.CreatePool(ctx, lbaas.lb, createOpt, lbID) if err != nil { return nil, err } @@ -939,14 +939,14 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list klog.V(2).Infof("Using serial API calls to update members for pool %s", pool.ID) var nodePort int = int(port.NodePort) - if err := openstackutil.SeriallyReconcilePoolMembers(lbaas.lb, pool, nodePort, lbID, nodes); err != nil { + if err := openstackutil.SeriallyReconcilePoolMembers(ctx, lbaas.lb, pool, nodePort, lbID, nodes); err != nil { return nil, err } return pool, nil } curMembers := sets.New[string]() - poolMembers, err := openstackutil.GetMembersbyPool(lbaas.lb, pool.ID) + poolMembers, err := openstackutil.GetMembersbyPool(ctx, lbaas.lb, pool.ID) if err != nil { klog.Errorf("failed to get members in the pool %s: %v", pool.ID, err) } @@ -954,14 +954,14 @@ func (lbaas *LbaasV2) ensureOctaviaPool(lbID string, name string, listener *list curMembers.Insert(fmt.Sprintf("%s-%s-%d-%d", m.Name, m.Address, m.ProtocolPort, m.MonitorPort)) } - members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(port, nodes, svcConf) + members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(ctx, port, nodes, svcConf) if err != nil { return nil, err } if !curMembers.Equal(newMembers) { klog.V(2).Infof("Updating %d members for pool %s", len(members), pool.ID) - if err := openstackutil.BatchUpdatePoolMembers(lbaas.lb, lbID, pool.ID, members); err != nil { + if err := openstackutil.BatchUpdatePoolMembers(ctx, lbaas.lb, lbID, pool.ID, members); err != nil { return nil, err } klog.V(2).Infof("Successfully updated %d members for pool %s", len(members), pool.ID) @@ -1012,7 +1012,7 @@ func (lbaas *LbaasV2) buildPoolCreateOpt(listenerProtocol string, service *corev } // buildBatchUpdateMemberOpts returns v2pools.BatchUpdateMemberOpts array for Services and Nodes alongside a list of member names -func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) ([]v2pools.BatchUpdateMemberOpts, sets.Set[string], error) { +func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(ctx context.Context, port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) ([]v2pools.BatchUpdateMemberOpts, sets.Set[string], error) { var members []v2pools.BatchUpdateMemberOpts newMembers := sets.New[string]() @@ -1040,7 +1040,7 @@ func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(port corev1.ServicePort, nodes Name: &node.Name, SubnetID: memberSubnetID, } - if svcConf.healthCheckNodePort > 0 && lbaas.canUseHTTPMonitor(port) { + if svcConf.healthCheckNodePort > 0 && lbaas.canUseHTTPMonitor(ctx, port) { member.MonitorPort = &svcConf.healthCheckNodePort } members = append(members, member) @@ -1050,8 +1050,8 @@ func (lbaas *LbaasV2) buildBatchUpdateMemberOpts(port corev1.ServicePort, nodes return members, newMembers, nil } -func (lbaas *LbaasV2) buildCreateMemberOpts(port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) ([]v2pools.CreateMemberOpts, sets.Set[string], error) { - batchUpdateMemberOpts, newMembers, err := lbaas.buildBatchUpdateMemberOpts(port, nodes, svcConf) +func (lbaas *LbaasV2) buildCreateMemberOpts(ctx context.Context, port corev1.ServicePort, nodes []*corev1.Node, svcConf *serviceConfig) ([]v2pools.CreateMemberOpts, sets.Set[string], error) { + batchUpdateMemberOpts, newMembers, err := lbaas.buildBatchUpdateMemberOpts(ctx, port, nodes, svcConf) if err != nil { return nil, nil, err } @@ -1076,19 +1076,19 @@ func (lbaas *LbaasV2) buildCreateMemberOpts(port corev1.ServicePort, nodes []*co } // Make sure the listener is created for Service -func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListenerMapping map[listenerKey]*listeners.Listener, port corev1.ServicePort, svcConf *serviceConfig) (*listeners.Listener, error) { +func (lbaas *LbaasV2) ensureOctaviaListener(ctx context.Context, lbID string, name string, curListenerMapping map[listenerKey]*listeners.Listener, port corev1.ServicePort, svcConf *serviceConfig) (*listeners.Listener, error) { listener, isPresent := curListenerMapping[listenerKey{ Protocol: getListenerProtocol(port.Protocol, svcConf), Port: int(port.Port), }] if !isPresent { - listenerCreateOpt := lbaas.buildListenerCreateOpt(port, svcConf, name) + listenerCreateOpt := lbaas.buildListenerCreateOpt(ctx, port, svcConf, name) listenerCreateOpt.LoadbalancerID = lbID klog.V(2).Infof("Creating listener for port %d using protocol %s", int(port.Port), listenerCreateOpt.Protocol) var err error - listener, err = openstackutil.CreateListener(lbaas.lb, lbID, listenerCreateOpt) + listener, err = openstackutil.CreateListener(ctx, lbaas.lb, lbID, listenerCreateOpt) if err != nil { return nil, fmt.Errorf("failed to create listener for loadbalancer %s: %v", lbID, err) } @@ -1130,7 +1130,7 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene updateOpts.DefaultTlsContainerRef = &svcConf.tlsContainerRef listenerChanged = true } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { if svcConf.timeoutClientData != listener.TimeoutClientData { updateOpts.TimeoutClientData = &svcConf.timeoutClientData listenerChanged = true @@ -1148,7 +1148,7 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene listenerChanged = true } } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { if !cpoutil.StringListEqual(svcConf.allowedCIDR, listener.AllowedCIDRs) { updateOpts.AllowedCIDRs = &svcConf.allowedCIDR listenerChanged = true @@ -1157,7 +1157,7 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene if listenerChanged { klog.InfoS("Updating listener", "listenerID", listener.ID, "lbID", lbID, "updateOpts", updateOpts) - if err := openstackutil.UpdateListener(lbaas.lb, lbID, listener.ID, updateOpts); err != nil { + if err := openstackutil.UpdateListener(ctx, lbaas.lb, lbID, listener.ID, updateOpts); err != nil { return nil, fmt.Errorf("failed to update listener %s of loadbalancer %s: %v", listener.ID, lbID, err) } klog.InfoS("Updated listener", "listenerID", listener.ID, "lbID", lbID) @@ -1168,7 +1168,7 @@ func (lbaas *LbaasV2) ensureOctaviaListener(lbID string, name string, curListene } // buildListenerCreateOpt returns listeners.CreateOpts for a specific Service port and configuration -func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *serviceConfig, name string) listeners.CreateOpts { +func (lbaas *LbaasV2) buildListenerCreateOpt(ctx context.Context, port corev1.ServicePort, svcConf *serviceConfig, name string) listeners.CreateOpts { listenerCreateOpt := listeners.CreateOpts{ Name: name, Protocol: listeners.Protocol(port.Protocol), @@ -1180,7 +1180,7 @@ func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *s listenerCreateOpt.Tags = []string{svcConf.lbName} } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { listenerCreateOpt.TimeoutClientData = &svcConf.timeoutClientData listenerCreateOpt.TimeoutMemberConnect = &svcConf.timeoutMemberConnect listenerCreateOpt.TimeoutMemberData = &svcConf.timeoutMemberData @@ -1204,7 +1204,7 @@ func (lbaas *LbaasV2) buildListenerCreateOpt(port corev1.ServicePort, svcConf *s listenerCreateOpt.Protocol = listeners.ProtocolHTTP } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { if len(svcConf.allowedCIDR) > 0 { listenerCreateOpt.AllowedCIDRs = svcConf.allowedCIDR } @@ -1339,12 +1339,12 @@ func (lbaas *LbaasV2) checkServiceUpdate(ctx context.Context, service *corev1.Se } } } - return lbaas.makeSvcConf(serviceName, service, svcConf) + return lbaas.makeSvcConf(ctx, serviceName, service, svcConf) } -func (lbaas *LbaasV2) checkServiceDelete(service *corev1.Service, svcConf *serviceConfig) error { +func (lbaas *LbaasV2) checkServiceDelete(ctx context.Context, service *corev1.Service, svcConf *serviceConfig) error { svcConf.lbID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerID, "") - svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) + svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) // This affects the protocol of listener and pool svcConf.keepClientIP = getBoolFromServiceAnnotation(service, ServiceAnnotationLoadBalancerXForwardedFor, false) @@ -1533,14 +1533,13 @@ func (lbaas *LbaasV2) checkService(ctx context.Context, service *corev1.Service, } else { klog.V(4).Infof("Ensure an internal loadbalancer service.") } - return lbaas.makeSvcConf(serviceName, service, svcConf) + return lbaas.makeSvcConf(ctx, serviceName, service, svcConf) } -func (lbaas *LbaasV2) makeSvcConf(serviceName string, service *corev1.Service, svcConf *serviceConfig) error { +func (lbaas *LbaasV2) makeSvcConf(ctx context.Context, serviceName string, service *corev1.Service, svcConf *serviceConfig) error { svcConf.connLimit = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerConnLimit, -1) svcConf.lbID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerID, "") - svcConf.poolLbMethod = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerLbMethod, "") - svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) + svcConf.supportLBTags = openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTags, lbaas.opts.LBProvider) // Get service node-selector annotations svcConf.nodeSelectors = getKeyValueFromServiceAnnotation(service, ServiceAnnotationLoadBalancerNodeSelector, lbaas.opts.NodeSelector) @@ -1559,7 +1558,7 @@ func (lbaas *LbaasV2) makeSvcConf(serviceName string, service *corev1.Service, s } svcConf.keepClientIP = keepClientIP - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureTimeout, lbaas.opts.LBProvider) { svcConf.timeoutClientData = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerTimeoutClientData, 50000) svcConf.timeoutMemberConnect = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerTimeoutMemberConnect, 5000) svcConf.timeoutMemberData = getIntFromServiceAnnotation(service, ServiceAnnotationLoadBalancerTimeoutMemberData, 50000) @@ -1570,7 +1569,7 @@ func (lbaas *LbaasV2) makeSvcConf(serviceName string, service *corev1.Service, s if err != nil { return fmt.Errorf("failed to get source ranges for loadbalancer service %s: %v", serviceName, err) } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureVIPACL, lbaas.opts.LBProvider) { klog.V(4).Info("LoadBalancerSourceRanges is suppported") svcConf.allowedCIDR = sourceRanges.StringSlice() } else if lbaas.opts.LBProvider == "ovn" && lbaas.opts.ManageSecurityGroups { @@ -1582,12 +1581,12 @@ func (lbaas *LbaasV2) makeSvcConf(serviceName string, service *corev1.Service, s klog.Warningf(msg, serviceName) } - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureFlavors, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureFlavors, lbaas.opts.LBProvider) { svcConf.flavorID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerFlavorID, lbaas.opts.FlavorID) } availabilityZone := getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerAvailabilityZone, lbaas.opts.AvailabilityZone) - if openstackutil.IsOctaviaFeatureSupported(lbaas.lb, openstackutil.OctaviaFeatureAvailabilityZones, lbaas.opts.LBProvider) { + if openstackutil.IsOctaviaFeatureSupported(ctx, lbaas.lb, openstackutil.OctaviaFeatureAvailabilityZones, lbaas.opts.LBProvider) { svcConf.availabilityZone = availabilityZone } else if availabilityZone != "" { msg := "LoadBalancer Availability Zones aren't supported. Please, upgrade Octavia API to version 2.14 or later (Ussuri release) to use them for Service %s" @@ -1688,7 +1687,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName // Check the load balancer in the Service annotation. if svcConf.lbID != "" { - loadbalancer, err = openstackutil.GetLoadbalancerByID(lbaas.lb, svcConf.lbID) + loadbalancer, err = openstackutil.GetLoadbalancerByID(ctx, lbaas.lb, svcConf.lbID) if err != nil { return nil, fmt.Errorf("failed to get load balancer %s: %v", svcConf.lbID, err) } @@ -1698,7 +1697,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName msg := "Loadbalancer %s has a name of %s with incorrect cluster-name component. Renaming it to %s." klog.Infof(msg, loadbalancer.ID, loadbalancer.Name, lbName) lbaas.eventRecorder.Eventf(service, corev1.EventTypeWarning, eventLBRename, msg, loadbalancer.ID, loadbalancer.Name, lbName) - loadbalancer, err = renameLoadBalancer(lbaas.lb, loadbalancer, lbName, clusterName) + loadbalancer, err = renameLoadBalancer(ctx, lbaas.lb, loadbalancer, lbName, clusterName) if err != nil { return nil, fmt.Errorf("failed to update load balancer %s with an updated name", svcConf.lbID) } @@ -1736,7 +1735,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } } else { legacyName := lbaas.getLoadBalancerLegacyName(service) - loadbalancer, err = getLoadbalancerByName(lbaas.lb, lbName, legacyName) + loadbalancer, err = getLoadbalancerByName(ctx, lbaas.lb, lbName, legacyName) if err != nil { if err != cpoerrors.ErrNotFound { return nil, fmt.Errorf("error getting loadbalancer for Service %s: %v", serviceName, err) @@ -1759,7 +1758,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName return nil, fmt.Errorf("load balancer %s is not ACTIVE, current provisioning status: %s", loadbalancer.ID, loadbalancer.ProvisioningStatus) } - loadbalancer.Listeners, err = openstackutil.GetListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) + loadbalancer.Listeners, err = openstackutil.GetListenersByLoadBalancerID(ctx, lbaas.lb, loadbalancer.ID) if err != nil { return nil, err } @@ -1783,17 +1782,17 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } for portIndex, port := range service.Spec.Ports { - listener, err := lbaas.ensureOctaviaListener(loadbalancer.ID, cpoutil.Sprintf255(listenerFormat, portIndex, lbName), curListenerMapping, port, svcConf) + listener, err := lbaas.ensureOctaviaListener(ctx, loadbalancer.ID, cpoutil.Sprintf255(listenerFormat, portIndex, lbName), curListenerMapping, port, svcConf) if err != nil { return nil, err } - pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, lbName), listener, service, port, filteredNodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(ctx, loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, lbName), listener, service, port, filteredNodes, svcConf) if err != nil { return nil, err } - if err := lbaas.ensureOctaviaHealthMonitor(loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, lbName), pool, port, svcConf); err != nil { + if err := lbaas.ensureOctaviaHealthMonitor(ctx, loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, lbName), pool, port, svcConf); err != nil { return nil, err } @@ -1804,7 +1803,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName } // Deal with the remaining listeners, delete the listener if it was created by this Service previously. - if err := lbaas.deleteOctaviaListeners(loadbalancer.ID, curListeners, isLBOwner, lbName); err != nil { + if err := lbaas.deleteOctaviaListeners(ctx, loadbalancer.ID, curListeners, isLBOwner, lbName); err != nil { return nil, err } } @@ -1831,7 +1830,7 @@ func (lbaas *LbaasV2) ensureOctaviaLoadBalancer(ctx context.Context, clusterName if !slices.Contains(lbTags, lbName) { lbTags = append(lbTags, lbName) klog.InfoS("Updating load balancer tags", "lbID", loadbalancer.ID, "tags", lbTags) - if err := openstackutil.UpdateLoadBalancerTags(lbaas.lb, loadbalancer.ID, lbTags); err != nil { + if err := openstackutil.UpdateLoadBalancerTags(ctx, lbaas.lb, loadbalancer.ID, lbTags); err != nil { return nil, err } } @@ -1903,7 +1902,7 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName // Get load balancer var loadbalancer *loadbalancers.LoadBalancer if svcConf.lbID != "" { - loadbalancer, err = openstackutil.GetLoadbalancerByID(lbaas.lb, svcConf.lbID) + loadbalancer, err = openstackutil.GetLoadbalancerByID(ctx, lbaas.lb, svcConf.lbID) if err != nil { return fmt.Errorf("failed to get load balancer %s: %v", svcConf.lbID, err) } @@ -1911,7 +1910,7 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName // This is a Service created before shared LB is supported. name := lbaas.GetLoadBalancerName(ctx, clusterName, service) legacyName := lbaas.getLoadBalancerLegacyName(service) - loadbalancer, err = getLoadbalancerByName(lbaas.lb, name, legacyName) + loadbalancer, err = getLoadbalancerByName(ctx, lbaas.lb, name, legacyName) if err != nil { return err } @@ -1920,7 +1919,7 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName return fmt.Errorf("load balancer %s is not ACTIVE, current provisioning status: %s", loadbalancer.ID, loadbalancer.ProvisioningStatus) } - loadbalancer.Listeners, err = openstackutil.GetListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) + loadbalancer.Listeners, err = openstackutil.GetListenersByLoadBalancerID(ctx, lbaas.lb, loadbalancer.ID) if err != nil { return err } @@ -1945,12 +1944,12 @@ func (lbaas *LbaasV2) updateOctaviaLoadBalancer(ctx context.Context, clusterName return fmt.Errorf("loadbalancer %s does not contain required listener for port %d and protocol %s", loadbalancer.ID, port.Port, port.Protocol) } - pool, err := lbaas.ensureOctaviaPool(loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, loadbalancer.Name), &listener, service, port, filteredNodes, svcConf) + pool, err := lbaas.ensureOctaviaPool(ctx, loadbalancer.ID, cpoutil.Sprintf255(poolFormat, portIndex, loadbalancer.Name), &listener, service, port, filteredNodes, svcConf) if err != nil { return err } - err = lbaas.ensureOctaviaHealthMonitor(loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, loadbalancer.Name), pool, port, svcConf) + err = lbaas.ensureOctaviaHealthMonitor(ctx, loadbalancer.ID, cpoutil.Sprintf255(monitorFormat, portIndex, loadbalancer.Name), pool, port, svcConf) if err != nil { return err } @@ -2004,16 +2003,16 @@ func (lbaas *LbaasV2) deleteFIPIfCreatedByProvider(ctx context.Context, fip *flo } // deleteLoadBalancer removes the LB and its children either by using Octavia cascade deletion or manually -func (lbaas *LbaasV2) deleteLoadBalancer(loadbalancer *loadbalancers.LoadBalancer, service *corev1.Service, svcConf *serviceConfig, needDeleteLB bool) error { +func (lbaas *LbaasV2) deleteLoadBalancer(ctx context.Context, loadbalancer *loadbalancers.LoadBalancer, service *corev1.Service, svcConf *serviceConfig, needDeleteLB bool) error { if needDeleteLB && lbaas.opts.CascadeDelete { klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, true); err != nil { + if err := openstackutil.DeleteLoadbalancer(ctx, lbaas.lb, loadbalancer.ID, true); err != nil { return err } klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) } else { // get all listeners associated with this loadbalancer - listenerList, err := openstackutil.GetListenersByLoadBalancerID(lbaas.lb, loadbalancer.ID) + listenerList, err := openstackutil.GetListenersByLoadBalancerID(ctx, lbaas.lb, loadbalancer.ID) if err != nil { return fmt.Errorf("error getting LB %s listeners: %v", loadbalancer.ID, err) } @@ -2042,7 +2041,7 @@ func (lbaas *LbaasV2) deleteLoadBalancer(loadbalancer *loadbalancers.LoadBalance // get all pools (and health monitors) associated with this loadbalancer var monitorIDs []string for _, listener := range listenerList { - pool, err := openstackutil.GetPoolByListener(lbaas.lb, loadbalancer.ID, listener.ID) + pool, err := openstackutil.GetPoolByListener(ctx, lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != cpoerrors.ErrNotFound { return fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } @@ -2056,21 +2055,21 @@ func (lbaas *LbaasV2) deleteLoadBalancer(loadbalancer *loadbalancers.LoadBalance // delete monitors for _, monitorID := range monitorIDs { klog.InfoS("Deleting health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) - if err := openstackutil.DeleteHealthMonitor(lbaas.lb, monitorID, loadbalancer.ID); err != nil { + if err := openstackutil.DeleteHealthMonitor(ctx, lbaas.lb, monitorID, loadbalancer.ID); err != nil { return err } klog.InfoS("Deleted health monitor", "monitorID", monitorID, "lbID", loadbalancer.ID) } // delete listeners - if err := lbaas.deleteListeners(loadbalancer.ID, listenerList); err != nil { + if err := lbaas.deleteListeners(ctx, loadbalancer.ID, listenerList); err != nil { return err } if needDeleteLB { // delete the loadbalancer in old way, i.e. no cascading. klog.InfoS("Deleting load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) - if err := openstackutil.DeleteLoadbalancer(lbaas.lb, loadbalancer.ID, false); err != nil { + if err := openstackutil.DeleteLoadbalancer(ctx, lbaas.lb, loadbalancer.ID, false); err != nil { return err } klog.InfoS("Deleted load balancer", "lbID", loadbalancer.ID, "service", klog.KObj(service)) @@ -2090,16 +2089,16 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName isCreatedByOCCM := false svcConf := new(serviceConfig) - if err := lbaas.checkServiceDelete(service, svcConf); err != nil { + if err := lbaas.checkServiceDelete(ctx, service, svcConf); err != nil { return err } svcConf.lbName = lbName if svcConf.lbID != "" { - loadbalancer, err = openstackutil.GetLoadbalancerByID(lbaas.lb, svcConf.lbID) + loadbalancer, err = openstackutil.GetLoadbalancerByID(ctx, lbaas.lb, svcConf.lbID) } else { // This may happen when this Service creation was failed previously. - loadbalancer, err = getLoadbalancerByName(lbaas.lb, lbName, legacyName) + loadbalancer, err = getLoadbalancerByName(ctx, lbaas.lb, lbName, legacyName) } if err != nil && !cpoerrors.IsNotFound(err) { return err @@ -2153,7 +2152,7 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName } } - if err = lbaas.deleteLoadBalancer(loadbalancer, service, svcConf, needDeleteLB); err != nil { + if err = lbaas.deleteLoadBalancer(ctx, loadbalancer, service, svcConf, needDeleteLB); err != nil { return err } @@ -2170,7 +2169,7 @@ func (lbaas *LbaasV2) ensureLoadBalancerDeleted(ctx context.Context, clusterName newTags = []string{""} } klog.InfoS("Updating load balancer tags", "lbID", loadbalancer.ID, "tags", newTags) - if err := openstackutil.UpdateLoadBalancerTags(lbaas.lb, loadbalancer.ID, newTags); err != nil { + if err := openstackutil.UpdateLoadBalancerTags(ctx, lbaas.lb, loadbalancer.ID, newTags); err != nil { return err } klog.InfoS("Updated load balancer tags", "lbID", loadbalancer.ID) diff --git a/pkg/openstack/loadbalancer_rename.go b/pkg/openstack/loadbalancer_rename.go index 50eeb7fad5..8adda9a791 100644 --- a/pkg/openstack/loadbalancer_rename.go +++ b/pkg/openstack/loadbalancer_rename.go @@ -17,6 +17,7 @@ limitations under the License. package openstack import ( + "context" "fmt" "regexp" "strings" @@ -76,8 +77,8 @@ func replaceClusterName(oldClusterName, clusterName, objectName string) string { // renameLoadBalancer renames all the children and then the LB itself to match new lbName. // The purpose is handling a change of clusterName. -func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbalancers.LoadBalancer, lbName, clusterName string) (*loadbalancers.LoadBalancer, error) { - lbListeners, err := openstackutil.GetListenersByLoadBalancerID(client, loadbalancer.ID) +func renameLoadBalancer(ctx context.Context, client *gophercloud.ServiceClient, loadbalancer *loadbalancers.LoadBalancer, lbName, clusterName string) (*loadbalancers.LoadBalancer, error) { + lbListeners, err := openstackutil.GetListenersByLoadBalancerID(ctx, client, loadbalancer.ID) if err != nil { return nil, err } @@ -91,7 +92,7 @@ func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbal if oldClusterName != clusterName { // First let's handle pool which we assume is a child of the listener. Only one pool per one listener. - lbPool, err := openstackutil.GetPoolByListener(client, loadbalancer.ID, listener.ID) + lbPool, err := openstackutil.GetPoolByListener(ctx, client, loadbalancer.ID, listener.ID) if err != nil { return nil, err } @@ -99,14 +100,14 @@ func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbal if oldClusterName != clusterName { if lbPool.MonitorID != "" { // If monitor exists, let's handle it first, as we treat it as child of the pool. - monitor, err := openstackutil.GetHealthMonitor(client, lbPool.MonitorID) + monitor, err := openstackutil.GetHealthMonitor(ctx, client, lbPool.MonitorID) if err != nil { return nil, err } oldClusterName := getClusterName(fmt.Sprintf("%s[0-9]+_", monitorPrefix), monitor.Name) if oldClusterName != clusterName { monitor.Name = replaceClusterName(oldClusterName, clusterName, monitor.Name) - err = openstackutil.UpdateHealthMonitor(client, monitor.ID, monitors.UpdateOpts{Name: &monitor.Name}, loadbalancer.ID) + err = openstackutil.UpdateHealthMonitor(ctx, client, monitor.ID, monitors.UpdateOpts{Name: &monitor.Name}, loadbalancer.ID) if err != nil { return nil, err } @@ -115,7 +116,7 @@ func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbal // Monitor is handled, let's rename the pool. lbPool.Name = replaceClusterName(oldClusterName, clusterName, lbPool.Name) - err = openstackutil.UpdatePool(client, loadbalancer.ID, lbPool.ID, pools.UpdateOpts{Name: &lbPool.Name}) + err = openstackutil.UpdatePool(ctx, client, loadbalancer.ID, lbPool.ID, pools.UpdateOpts{Name: &lbPool.Name}) if err != nil { return nil, err } @@ -129,7 +130,7 @@ func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbal } } listener.Name = replaceClusterName(oldClusterName, clusterName, listener.Name) - err = openstackutil.UpdateListener(client, loadbalancer.ID, listener.ID, listeners.UpdateOpts{Name: &listener.Name, Tags: &listener.Tags}) + err = openstackutil.UpdateListener(ctx, client, loadbalancer.ID, listener.ID, listeners.UpdateOpts{Name: &listener.Name, Tags: &listener.Tags}) if err != nil { return nil, err } @@ -145,5 +146,5 @@ func renameLoadBalancer(client *gophercloud.ServiceClient, loadbalancer *loadbal loadbalancer.Tags[i] = replaceClusterName(oldClusterNameTag, clusterName, tag) } } - return openstackutil.UpdateLoadBalancer(client, loadbalancer.ID, loadbalancers.UpdateOpts{Name: &lbName, Tags: &loadbalancer.Tags}) + return openstackutil.UpdateLoadBalancer(ctx, client, loadbalancer.ID, loadbalancers.UpdateOpts{Name: &lbName, Tags: &loadbalancer.Tags}) } diff --git a/pkg/openstack/loadbalancer_sg.go b/pkg/openstack/loadbalancer_sg.go index 0443c45566..8098e9f2fd 100644 --- a/pkg/openstack/loadbalancer_sg.go +++ b/pkg/openstack/loadbalancer_sg.go @@ -256,7 +256,7 @@ func (lbaas *LbaasV2) ensureAndUpdateOctaviaSecurityGroup(ctx context.Context, c cidrs = svcConf.allowedCIDR } - existingRules, err := openstackutil.GetSecurityGroupRules(lbaas.network, rules.ListOpts{SecGroupID: lbSecGroupID}) + existingRules, err := openstackutil.GetSecurityGroupRules(ctx, lbaas.network, rules.ListOpts{SecGroupID: lbSecGroupID}) if err != nil { return fmt.Errorf( "failed to find security group rules in %s: %v", lbSecGroupID, err) diff --git a/pkg/openstack/loadbalancer_test.go b/pkg/openstack/loadbalancer_test.go index 8592885bf4..b50c88941f 100644 --- a/pkg/openstack/loadbalancer_test.go +++ b/pkg/openstack/loadbalancer_test.go @@ -1988,7 +1988,7 @@ func TestBuildBatchUpdateMemberOpts(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { lbaas := &LbaasV2{} - members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(tc.port, tc.nodes, tc.svcConf) + members, newMembers, err := lbaas.buildBatchUpdateMemberOpts(context.TODO(), tc.port, tc.nodes, tc.svcConf) assert.Len(t, members, tc.expectedLen) assert.NoError(t, err) @@ -2362,7 +2362,7 @@ func Test_buildMonitorCreateOpts(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := tt.testArg.lbaas.buildMonitorCreateOpts(tt.testArg.svcConf, tt.testArg.port, tt.name) + result := tt.testArg.lbaas.buildMonitorCreateOpts(context.TODO(), tt.testArg.svcConf, tt.testArg.port, tt.name) assert.Equal(t, tt.want, result) }) } @@ -2500,7 +2500,7 @@ func TestBuildListenerCreateOpt(t *testing.T) { }, }, } - createOpt := lbaas.buildListenerCreateOpt(tc.port, tc.svcConf, tc.name) + createOpt := lbaas.buildListenerCreateOpt(context.TODO(), tc.port, tc.svcConf, tc.name) assert.Equal(t, tc.expectedCreateOpt, createOpt) }) diff --git a/pkg/util/openstack/keymanager.go b/pkg/util/openstack/keymanager.go index 23a5fb0d76..9ac2b8d23f 100644 --- a/pkg/util/openstack/keymanager.go +++ b/pkg/util/openstack/keymanager.go @@ -28,12 +28,12 @@ import ( ) // EnsureSecret creates a secret if it doesn't exist. -func EnsureSecret(client *gophercloud.ServiceClient, name string, secretType string, payload string) (string, error) { - secret, err := GetSecret(client, name) +func EnsureSecret(ctx context.Context, client *gophercloud.ServiceClient, name string, secretType string, payload string) (string, error) { + secret, err := GetSecret(ctx, client, name) if err != nil { if err == cpoerrors.ErrNotFound { // Create a new one - return CreateSecret(client, name, secretType, payload) + return CreateSecret(ctx, client, name, secretType, payload) } return "", err @@ -43,12 +43,12 @@ func EnsureSecret(client *gophercloud.ServiceClient, name string, secretType str } // GetSecret returns the secret by name -func GetSecret(client *gophercloud.ServiceClient, name string) (*secrets.Secret, error) { +func GetSecret(ctx context.Context, client *gophercloud.ServiceClient, name string) (*secrets.Secret, error) { listOpts := secrets.ListOpts{ Name: name, } mc := metrics.NewMetricContext("secret", "list") - allPages, err := secrets.List(client, listOpts).AllPages(context.TODO()) + allPages, err := secrets.List(client, listOpts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } @@ -68,7 +68,7 @@ func GetSecret(client *gophercloud.ServiceClient, name string) (*secrets.Secret, } // CreateSecret creates a secret in Barbican, returns the secret url. -func CreateSecret(client *gophercloud.ServiceClient, name string, secretType string, payload string) (string, error) { +func CreateSecret(ctx context.Context, client *gophercloud.ServiceClient, name string, secretType string, payload string) (string, error) { createOpts := secrets.CreateOpts{ Name: name, Algorithm: "aes", @@ -80,7 +80,7 @@ func CreateSecret(client *gophercloud.ServiceClient, name string, secretType str SecretType: secrets.OpaqueSecret, } mc := metrics.NewMetricContext("secret", "create") - secret, err := secrets.Create(context.TODO(), client, createOpts).Extract() + secret, err := secrets.Create(ctx, client, createOpts).Extract() if mc.ObserveRequest(err) != nil { return "", err } @@ -98,12 +98,12 @@ func ParseSecretID(ref string) (string, error) { } // DeleteSecrets deletes all the secrets that including the name string. -func DeleteSecrets(client *gophercloud.ServiceClient, partName string) error { +func DeleteSecrets(ctx context.Context, client *gophercloud.ServiceClient, partName string) error { listOpts := secrets.ListOpts{ SecretType: secrets.OpaqueSecret, } mc := metrics.NewMetricContext("secret", "list") - allPages, err := secrets.List(client, listOpts).AllPages(context.TODO()) + allPages, err := secrets.List(client, listOpts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return err } @@ -119,7 +119,7 @@ func DeleteSecrets(client *gophercloud.ServiceClient, partName string) error { return err } mc := metrics.NewMetricContext("secret", "delete") - err = secrets.Delete(context.TODO(), client, secretID).ExtractErr() + err = secrets.Delete(ctx, client, secretID).ExtractErr() if mc.ObserveRequest(err) != nil && !cpoerrors.IsNotFound(err) { return err } diff --git a/pkg/util/openstack/loadbalancer.go b/pkg/util/openstack/loadbalancer.go index 41654ab681..320e481499 100644 --- a/pkg/util/openstack/loadbalancer.go +++ b/pkg/util/openstack/loadbalancer.go @@ -61,14 +61,14 @@ var ( ) // getOctaviaVersion returns the current Octavia API version. -func getOctaviaVersion(client *gophercloud.ServiceClient) (string, error) { +func getOctaviaVersion(ctx context.Context, client *gophercloud.ServiceClient) (string, error) { if octaviaVersion != "" { return octaviaVersion, nil } var defaultVer = "0.0" mc := metrics.NewMetricContext("version", "list") - allPages, err := apiversions.List(client).AllPages(context.TODO()) + allPages, err := apiversions.List(client).AllPages(ctx) if mc.ObserveRequest(err) != nil { return defaultVer, err } @@ -90,8 +90,8 @@ func getOctaviaVersion(client *gophercloud.ServiceClient) (string, error) { } // IsOctaviaFeatureSupported returns true if the given feature is supported in the deployed Octavia version. -func IsOctaviaFeatureSupported(client *gophercloud.ServiceClient, feature int, lbProvider string) bool { - octaviaVer, err := getOctaviaVersion(client) +func IsOctaviaFeatureSupported(ctx context.Context, client *gophercloud.ServiceClient, feature int, lbProvider string) bool { + octaviaVer, err := getOctaviaVersion(ctx, client) if err != nil { klog.Warningf("Failed to get current Octavia API version: %v", err) return false @@ -163,7 +163,7 @@ func getTimeoutSteps(name string, steps int) int { } // WaitActiveAndGetLoadBalancer wait for LB active then return the LB object for further usage -func WaitActiveAndGetLoadBalancer(client *gophercloud.ServiceClient, loadbalancerID string) (*loadbalancers.LoadBalancer, error) { +func WaitActiveAndGetLoadBalancer(ctx context.Context, client *gophercloud.ServiceClient, loadbalancerID string) (*loadbalancers.LoadBalancer, error) { klog.InfoS("Waiting for load balancer ACTIVE", "lbID", loadbalancerID) steps := getTimeoutSteps("OCCM_WAIT_LB_ACTIVE_STEPS", waitLoadbalancerActiveSteps) backoff := wait.Backoff{ @@ -176,7 +176,7 @@ func WaitActiveAndGetLoadBalancer(client *gophercloud.ServiceClient, loadbalance err := wait.ExponentialBackoff(backoff, func() (bool, error) { mc := metrics.NewMetricContext("loadbalancer", "get") var err error - loadbalancer, err = loadbalancers.Get(context.TODO(), client, loadbalancerID).Extract() + loadbalancer, err = loadbalancers.Get(ctx, client, loadbalancerID).Extract() if mc.ObserveRequest(err) != nil { klog.Warningf("Failed to fetch loadbalancer status from OpenStack (lbID %q): %s", loadbalancerID, err) return false, nil @@ -200,9 +200,9 @@ func WaitActiveAndGetLoadBalancer(client *gophercloud.ServiceClient, loadbalance } // GetLoadBalancers returns all the filtered load balancer. -func GetLoadBalancers(client *gophercloud.ServiceClient, opts loadbalancers.ListOpts) ([]loadbalancers.LoadBalancer, error) { +func GetLoadBalancers(ctx context.Context, client *gophercloud.ServiceClient, opts loadbalancers.ListOpts) ([]loadbalancers.LoadBalancer, error) { mc := metrics.NewMetricContext("loadbalancer", "list") - allPages, err := loadbalancers.List(client, opts).AllPages(context.TODO()) + allPages, err := loadbalancers.List(client, opts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } @@ -215,9 +215,9 @@ func GetLoadBalancers(client *gophercloud.ServiceClient, opts loadbalancers.List } // GetLoadbalancerByID retrieves loadbalancer object -func GetLoadbalancerByID(client *gophercloud.ServiceClient, lbID string) (*loadbalancers.LoadBalancer, error) { +func GetLoadbalancerByID(ctx context.Context, client *gophercloud.ServiceClient, lbID string) (*loadbalancers.LoadBalancer, error) { mc := metrics.NewMetricContext("loadbalancer", "get") - lb, err := loadbalancers.Get(context.TODO(), client, lbID).Extract() + lb, err := loadbalancers.Get(ctx, client, lbID).Extract() if mc.ObserveRequest(err) != nil { return nil, err } @@ -226,12 +226,12 @@ func GetLoadbalancerByID(client *gophercloud.ServiceClient, lbID string) (*loadb } // GetLoadbalancerByName retrieves loadbalancer object -func GetLoadbalancerByName(client *gophercloud.ServiceClient, name string) (*loadbalancers.LoadBalancer, error) { +func GetLoadbalancerByName(ctx context.Context, client *gophercloud.ServiceClient, name string) (*loadbalancers.LoadBalancer, error) { opts := loadbalancers.ListOpts{ Name: name, } mc := metrics.NewMetricContext("loadbalancer", "list") - allPages, err := loadbalancers.List(client, opts).AllPages(context.TODO()) + allPages, err := loadbalancers.List(client, opts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } @@ -251,25 +251,25 @@ func GetLoadbalancerByName(client *gophercloud.ServiceClient, name string) (*loa } // UpdateLoadBalancerTags updates tags for the load balancer -func UpdateLoadBalancerTags(client *gophercloud.ServiceClient, lbID string, tags []string) error { +func UpdateLoadBalancerTags(ctx context.Context, client *gophercloud.ServiceClient, lbID string, tags []string) error { updateOpts := loadbalancers.UpdateOpts{ Tags: &tags, } - _, err := UpdateLoadBalancer(client, lbID, updateOpts) + _, err := UpdateLoadBalancer(ctx, client, lbID, updateOpts) return err } // UpdateLoadBalancer updates the load balancer -func UpdateLoadBalancer(client *gophercloud.ServiceClient, lbID string, updateOpts loadbalancers.UpdateOpts) (*loadbalancers.LoadBalancer, error) { +func UpdateLoadBalancer(ctx context.Context, client *gophercloud.ServiceClient, lbID string, updateOpts loadbalancers.UpdateOpts) (*loadbalancers.LoadBalancer, error) { mc := metrics.NewMetricContext("loadbalancer", "update") - _, err := loadbalancers.Update(context.TODO(), client, lbID, updateOpts).Extract() + _, err := loadbalancers.Update(ctx, client, lbID, updateOpts).Extract() if mc.ObserveRequest(err) != nil { return nil, err } - lb, err := WaitActiveAndGetLoadBalancer(client, lbID) + lb, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID) if err != nil { return nil, fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating: %v", lbID, err) } @@ -277,7 +277,7 @@ func UpdateLoadBalancer(client *gophercloud.ServiceClient, lbID string, updateOp return lb, nil } -func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID string) error { +func waitLoadbalancerDeleted(ctx context.Context, client *gophercloud.ServiceClient, loadbalancerID string) error { klog.V(4).InfoS("Waiting for load balancer deleted", "lbID", loadbalancerID) backoff := wait.Backoff{ Duration: waitLoadbalancerInitDelay, @@ -286,7 +286,7 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s } err := wait.ExponentialBackoff(backoff, func() (bool, error) { mc := metrics.NewMetricContext("loadbalancer", "get") - _, err := loadbalancers.Get(context.TODO(), client, loadbalancerID).Extract() + _, err := loadbalancers.Get(ctx, client, loadbalancerID).Extract() if err != nil { if cpoerrors.IsNotFound(err) { klog.V(4).InfoS("Load balancer deleted", "lbID", loadbalancerID) @@ -305,21 +305,21 @@ func waitLoadbalancerDeleted(client *gophercloud.ServiceClient, loadbalancerID s } // DeleteLoadbalancer deletes a loadbalancer and wait for it's gone. -func DeleteLoadbalancer(client *gophercloud.ServiceClient, lbID string, cascade bool) error { +func DeleteLoadbalancer(ctx context.Context, client *gophercloud.ServiceClient, lbID string, cascade bool) error { opts := loadbalancers.DeleteOpts{} if cascade { opts.Cascade = true } mc := metrics.NewMetricContext("loadbalancer", "delete") - err := loadbalancers.Delete(context.TODO(), client, lbID, opts).ExtractErr() + err := loadbalancers.Delete(ctx, client, lbID, opts).ExtractErr() if err != nil && !cpoerrors.IsNotFound(err) { _ = mc.ObserveRequest(err) return fmt.Errorf("error deleting loadbalancer %s: %v", lbID, err) } _ = mc.ObserveRequest(nil) - if err := waitLoadbalancerDeleted(client, lbID); err != nil { + if err := waitLoadbalancerDeleted(ctx, client, lbID); err != nil { return err } @@ -327,14 +327,14 @@ func DeleteLoadbalancer(client *gophercloud.ServiceClient, lbID string, cascade } // UpdateListener updates a listener and wait for the lb active -func UpdateListener(client *gophercloud.ServiceClient, lbID string, listenerID string, opts listeners.UpdateOpts) error { +func UpdateListener(ctx context.Context, client *gophercloud.ServiceClient, lbID string, listenerID string, opts listeners.UpdateOpts) error { mc := metrics.NewMetricContext("loadbalancer_listener", "update") - _, err := listeners.Update(context.TODO(), client, listenerID, opts).Extract() + _, err := listeners.Update(ctx, client, listenerID, opts).Extract() if mc.ObserveRequest(err) != nil { return err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating listener: %v", lbID, err) } @@ -342,14 +342,14 @@ func UpdateListener(client *gophercloud.ServiceClient, lbID string, listenerID s } // CreateListener creates a new listener -func CreateListener(client *gophercloud.ServiceClient, lbID string, opts listeners.CreateOpts) (*listeners.Listener, error) { +func CreateListener(ctx context.Context, client *gophercloud.ServiceClient, lbID string, opts listeners.CreateOpts) (*listeners.Listener, error) { mc := metrics.NewMetricContext("loadbalancer_listener", "create") - listener, err := listeners.Create(context.TODO(), client, opts).Extract() + listener, err := listeners.Create(ctx, client, opts).Extract() if mc.ObserveRequest(err) != nil { return nil, err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return nil, fmt.Errorf("failed to wait for load balancer %s ACTIVE after creating listener: %v", lbID, err) } @@ -357,9 +357,9 @@ func CreateListener(client *gophercloud.ServiceClient, lbID string, opts listene } // DeleteListener deletes a listener. -func DeleteListener(client *gophercloud.ServiceClient, listenerID string, lbID string) error { +func DeleteListener(ctx context.Context, client *gophercloud.ServiceClient, listenerID string, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_listener", "delete") - if err := listeners.Delete(context.TODO(), client, listenerID).ExtractErr(); mc.ObserveRequest(err) != nil { + if err := listeners.Delete(ctx, client, listenerID).ExtractErr(); mc.ObserveRequest(err) != nil { if cpoerrors.IsNotFound(err) { klog.V(2).Infof("Listener %s for load balancer %s was already deleted: %v", listenerID, lbID, err) } else { @@ -368,7 +368,7 @@ func DeleteListener(client *gophercloud.ServiceClient, listenerID string, lbID s } } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after deleting listener: %v", lbID, err) } @@ -376,7 +376,7 @@ func DeleteListener(client *gophercloud.ServiceClient, listenerID string, lbID s } // GetListenerByName gets a listener by its name, raise error if not found or get multiple ones. -func GetListenerByName(client *gophercloud.ServiceClient, name string, lbID string) (*listeners.Listener, error) { +func GetListenerByName(ctx context.Context, client *gophercloud.ServiceClient, name string, lbID string) (*listeners.Listener, error) { opts := listeners.ListOpts{ Name: name, LoadbalancerID: lbID, @@ -385,7 +385,7 @@ func GetListenerByName(client *gophercloud.ServiceClient, name string, lbID stri pager := listeners.List(client, opts) var listenerList []listeners.Listener - err := pager.EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := pager.EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { v, err := listeners.ExtractListeners(page) if err != nil { return false, err @@ -411,11 +411,11 @@ func GetListenerByName(client *gophercloud.ServiceClient, name string, lbID stri } // GetListenersByLoadBalancerID returns listener list -func GetListenersByLoadBalancerID(client *gophercloud.ServiceClient, lbID string) ([]listeners.Listener, error) { +func GetListenersByLoadBalancerID(ctx context.Context, client *gophercloud.ServiceClient, lbID string) ([]listeners.Listener, error) { mc := metrics.NewMetricContext("loadbalancer_listener", "list") var lbListeners []listeners.Listener - allPages, err := listeners.List(client, listeners.ListOpts{LoadbalancerID: lbID}).AllPages(context.TODO()) + allPages, err := listeners.List(client, listeners.ListOpts{LoadbalancerID: lbID}).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } @@ -428,14 +428,14 @@ func GetListenersByLoadBalancerID(client *gophercloud.ServiceClient, lbID string } // CreatePool creates a new pool. -func CreatePool(client *gophercloud.ServiceClient, opts pools.CreateOptsBuilder, lbID string) (*pools.Pool, error) { +func CreatePool(ctx context.Context, client *gophercloud.ServiceClient, opts pools.CreateOptsBuilder, lbID string) (*pools.Pool, error) { mc := metrics.NewMetricContext("loadbalancer_pool", "create") - pool, err := pools.Create(context.TODO(), client, opts).Extract() + pool, err := pools.Create(ctx, client, opts).Extract() if mc.ObserveRequest(err) != nil { return nil, err } - if _, err = WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err = WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return nil, fmt.Errorf("failed to wait for load balancer ACTIVE after creating pool: %v", err) } @@ -443,7 +443,7 @@ func CreatePool(client *gophercloud.ServiceClient, opts pools.CreateOptsBuilder, } // GetPoolByName gets a pool by its name, raise error if not found or get multiple ones. -func GetPoolByName(client *gophercloud.ServiceClient, name string, lbID string) (*pools.Pool, error) { +func GetPoolByName(ctx context.Context, client *gophercloud.ServiceClient, name string, lbID string) (*pools.Pool, error) { var listenerPools []pools.Pool opts := pools.ListOpts{ @@ -451,7 +451,7 @@ func GetPoolByName(client *gophercloud.ServiceClient, name string, lbID string) LoadbalancerID: lbID, } mc := metrics.NewMetricContext("loadbalancer_pool", "list") - err := pools.List(client, opts).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := pools.List(client, opts).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { v, err := pools.ExtractPools(page) if err != nil { return false, err @@ -480,10 +480,10 @@ func GetPoolByName(client *gophercloud.ServiceClient, name string, lbID string) // GetPoolByListener finds pool for a listener. // A listener always has exactly one pool. -func GetPoolByListener(client *gophercloud.ServiceClient, lbID, listenerID string) (*pools.Pool, error) { +func GetPoolByListener(ctx context.Context, client *gophercloud.ServiceClient, lbID, listenerID string) (*pools.Pool, error) { listenerPools := make([]pools.Pool, 0, 1) mc := metrics.NewMetricContext("loadbalancer_pool", "list") - err := pools.List(client, pools.ListOpts{LoadbalancerID: lbID}).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := pools.List(client, pools.ListOpts{LoadbalancerID: lbID}).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { poolsList, err := pools.ExtractPools(page) if err != nil { return false, err @@ -515,13 +515,13 @@ func GetPoolByListener(client *gophercloud.ServiceClient, lbID, listenerID strin } // GetPools retrieves the pools belong to the loadbalancer. -func GetPools(client *gophercloud.ServiceClient, lbID string) ([]pools.Pool, error) { +func GetPools(ctx context.Context, client *gophercloud.ServiceClient, lbID string) ([]pools.Pool, error) { var lbPools []pools.Pool opts := pools.ListOpts{ LoadbalancerID: lbID, } - allPages, err := pools.List(client, opts).AllPages(context.TODO()) + allPages, err := pools.List(client, opts).AllPages(ctx) if err != nil { return nil, err } @@ -535,11 +535,11 @@ func GetPools(client *gophercloud.ServiceClient, lbID string) ([]pools.Pool, err } // GetMembersbyPool get all the members in the pool. -func GetMembersbyPool(client *gophercloud.ServiceClient, poolID string) ([]pools.Member, error) { +func GetMembersbyPool(ctx context.Context, client *gophercloud.ServiceClient, poolID string) ([]pools.Member, error) { var members []pools.Member mc := metrics.NewMetricContext("loadbalancer_member", "list") - err := pools.ListMembers(client, poolID, pools.ListMembersOpts{}).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := pools.ListMembers(client, poolID, pools.ListMembersOpts{}).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { membersList, err := pools.ExtractMembers(page) if err != nil { return false, err @@ -556,14 +556,14 @@ func GetMembersbyPool(client *gophercloud.ServiceClient, poolID string) ([]pools } // UpdatePool updates a pool and wait for the lb active -func UpdatePool(client *gophercloud.ServiceClient, lbID string, poolID string, opts pools.UpdateOpts) error { +func UpdatePool(ctx context.Context, client *gophercloud.ServiceClient, lbID string, poolID string, opts pools.UpdateOpts) error { mc := metrics.NewMetricContext("loadbalancer_pool", "update") - _, err := pools.Update(context.TODO(), client, poolID, opts).Extract() + _, err := pools.Update(ctx, client, poolID, opts).Extract() if mc.ObserveRequest(err) != nil { return err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating pool: %v", lbID, err) } @@ -571,16 +571,16 @@ func UpdatePool(client *gophercloud.ServiceClient, lbID string, poolID string, o } // DeletePool deletes a pool. -func DeletePool(client *gophercloud.ServiceClient, poolID string, lbID string) error { +func DeletePool(ctx context.Context, client *gophercloud.ServiceClient, poolID string, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_pool", "delete") - if err := pools.Delete(context.TODO(), client, poolID).ExtractErr(); mc.ObserveRequest(err) != nil { + if err := pools.Delete(ctx, client, poolID).ExtractErr(); mc.ObserveRequest(err) != nil { if cpoerrors.IsNotFound(err) { klog.V(2).Infof("Pool %s for load balancer %s was already deleted: %v", poolID, lbID, err) } else { return fmt.Errorf("error deleting pool %s for load balancer %s: %v", poolID, lbID, err) } } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after deleting pool: %v", lbID, err) } @@ -588,14 +588,14 @@ func DeletePool(client *gophercloud.ServiceClient, poolID string, lbID string) e } // BatchUpdatePoolMembers updates pool members in batch. -func BatchUpdatePoolMembers(client *gophercloud.ServiceClient, lbID string, poolID string, opts []pools.BatchUpdateMemberOpts) error { +func BatchUpdatePoolMembers(ctx context.Context, client *gophercloud.ServiceClient, lbID string, poolID string, opts []pools.BatchUpdateMemberOpts) error { mc := metrics.NewMetricContext("loadbalancer_members", "update") - err := pools.BatchUpdateMembers(context.TODO(), client, poolID, opts).ExtractErr() + err := pools.BatchUpdateMembers(ctx, client, poolID, opts).ExtractErr() if mc.ObserveRequest(err) != nil { return err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating pool members for %s: %v", lbID, poolID, err) } @@ -603,12 +603,12 @@ func BatchUpdatePoolMembers(client *gophercloud.ServiceClient, lbID string, pool } // GetL7policies retrieves all l7 policies for the given listener. -func GetL7policies(client *gophercloud.ServiceClient, listenerID string) ([]l7policies.L7Policy, error) { +func GetL7policies(ctx context.Context, client *gophercloud.ServiceClient, listenerID string) ([]l7policies.L7Policy, error) { var policies []l7policies.L7Policy opts := l7policies.ListOpts{ ListenerID: listenerID, } - err := l7policies.List(client, opts).EachPage(context.TODO(), func(_ context.Context, page pagination.Page) (bool, error) { + err := l7policies.List(client, opts).EachPage(ctx, func(_ context.Context, page pagination.Page) (bool, error) { v, err := l7policies.ExtractL7Policies(page) if err != nil { return false, err @@ -624,14 +624,14 @@ func GetL7policies(client *gophercloud.ServiceClient, listenerID string) ([]l7po } // CreateL7Policy creates a l7 policy. -func CreateL7Policy(client *gophercloud.ServiceClient, opts l7policies.CreateOpts, lbID string) (*l7policies.L7Policy, error) { +func CreateL7Policy(ctx context.Context, client *gophercloud.ServiceClient, opts l7policies.CreateOpts, lbID string) (*l7policies.L7Policy, error) { mc := metrics.NewMetricContext("loadbalancer_l7policy", "create") - policy, err := l7policies.Create(context.TODO(), client, opts).Extract() + policy, err := l7policies.Create(ctx, client, opts).Extract() if mc.ObserveRequest(err) != nil { return nil, err } - if _, err = WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err = WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return nil, fmt.Errorf("failed to wait for load balancer ACTIVE after creating l7policy: %v", err) } @@ -639,13 +639,13 @@ func CreateL7Policy(client *gophercloud.ServiceClient, opts l7policies.CreateOpt } // DeleteL7policy deletes a l7 policy. -func DeleteL7policy(client *gophercloud.ServiceClient, policyID string, lbID string) error { +func DeleteL7policy(ctx context.Context, client *gophercloud.ServiceClient, policyID string, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_l7policy", "delete") - if err := l7policies.Delete(context.TODO(), client, policyID).ExtractErr(); mc.ObserveRequest(err) != nil { + if err := l7policies.Delete(ctx, client, policyID).ExtractErr(); mc.ObserveRequest(err) != nil { return err } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after deleting l7policy: %v", lbID, err) } @@ -653,9 +653,9 @@ func DeleteL7policy(client *gophercloud.ServiceClient, policyID string, lbID str } // GetL7Rules gets all the rules for a l7 policy -func GetL7Rules(client *gophercloud.ServiceClient, policyID string) ([]l7policies.Rule, error) { +func GetL7Rules(ctx context.Context, client *gophercloud.ServiceClient, policyID string) ([]l7policies.Rule, error) { listOpts := l7policies.ListRulesOpts{} - allPages, err := l7policies.ListRules(client, policyID, listOpts).AllPages(context.TODO()) + allPages, err := l7policies.ListRules(client, policyID, listOpts).AllPages(ctx) if err != nil { return nil, err } @@ -668,14 +668,14 @@ func GetL7Rules(client *gophercloud.ServiceClient, policyID string) ([]l7policie } // CreateL7Rule creates a l7 rule. -func CreateL7Rule(client *gophercloud.ServiceClient, policyID string, opts l7policies.CreateRuleOpts, lbID string) error { +func CreateL7Rule(ctx context.Context, client *gophercloud.ServiceClient, policyID string, opts l7policies.CreateRuleOpts, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_l7rule", "create") - _, err := l7policies.CreateRule(context.TODO(), client, policyID, opts).Extract() + _, err := l7policies.CreateRule(ctx, client, policyID, opts).Extract() if mc.ObserveRequest(err) != nil { return err } - if _, err = WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err = WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer ACTIVE after creating l7policy rule: %v", err) } @@ -683,14 +683,14 @@ func CreateL7Rule(client *gophercloud.ServiceClient, policyID string, opts l7pol } // UpdateHealthMonitor updates a health monitor. -func UpdateHealthMonitor(client *gophercloud.ServiceClient, monitorID string, opts monitors.UpdateOpts, lbID string) error { +func UpdateHealthMonitor(ctx context.Context, client *gophercloud.ServiceClient, monitorID string, opts monitors.UpdateOpts, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "update") - _, err := monitors.Update(context.TODO(), client, monitorID, opts).Extract() + _, err := monitors.Update(ctx, client, monitorID, opts).Extract() if mc.ObserveRequest(err) != nil { return fmt.Errorf("failed to update healthmonitor: %v", err) } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after updating healthmonitor: %v", lbID, err) } @@ -698,14 +698,14 @@ func UpdateHealthMonitor(client *gophercloud.ServiceClient, monitorID string, op } // DeleteHealthMonitor deletes a health monitor. -func DeleteHealthMonitor(client *gophercloud.ServiceClient, monitorID string, lbID string) error { +func DeleteHealthMonitor(ctx context.Context, client *gophercloud.ServiceClient, monitorID string, lbID string) error { mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "delete") - err := monitors.Delete(context.TODO(), client, monitorID).ExtractErr() + err := monitors.Delete(ctx, client, monitorID).ExtractErr() if err != nil && !cpoerrors.IsNotFound(err) { return mc.ObserveRequest(err) } _ = mc.ObserveRequest(nil) - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return fmt.Errorf("failed to wait for load balancer %s ACTIVE after deleting healthmonitor: %v", lbID, err) } @@ -713,14 +713,14 @@ func DeleteHealthMonitor(client *gophercloud.ServiceClient, monitorID string, lb } // CreateHealthMonitor creates a health monitor in a pool. -func CreateHealthMonitor(client *gophercloud.ServiceClient, opts monitors.CreateOpts, lbID string) (*monitors.Monitor, error) { +func CreateHealthMonitor(ctx context.Context, client *gophercloud.ServiceClient, opts monitors.CreateOpts, lbID string) (*monitors.Monitor, error) { mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "create") - monitor, err := monitors.Create(context.TODO(), client, opts).Extract() + monitor, err := monitors.Create(ctx, client, opts).Extract() if mc.ObserveRequest(err) != nil { return nil, fmt.Errorf("failed to create healthmonitor: %v", err) } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return nil, fmt.Errorf("failed to wait for load balancer %s ACTIVE after creating healthmonitor: %v", lbID, err) } @@ -728,9 +728,9 @@ func CreateHealthMonitor(client *gophercloud.ServiceClient, opts monitors.Create } // GetHealthMonitor gets details about loadbalancer health monitor. -func GetHealthMonitor(client *gophercloud.ServiceClient, monitorID string) (*monitors.Monitor, error) { +func GetHealthMonitor(ctx context.Context, client *gophercloud.ServiceClient, monitorID string) (*monitors.Monitor, error) { mc := metrics.NewMetricContext("loadbalancer_healthmonitor", "get") - monitor, err := monitors.Get(context.TODO(), client, monitorID).Extract() + monitor, err := monitors.Get(ctx, client, monitorID).Extract() if mc.ObserveRequest(err) != nil { return nil, fmt.Errorf("failed to get healthmonitor: %v", err) } diff --git a/pkg/util/openstack/loadbalancer_serial.go b/pkg/util/openstack/loadbalancer_serial.go index 4538e48ae2..a609ce77b0 100644 --- a/pkg/util/openstack/loadbalancer_serial.go +++ b/pkg/util/openstack/loadbalancer_serial.go @@ -49,9 +49,9 @@ func getNodeAddressForLB(node *apiv1.Node) (string, error) { return addrs[0].Address, nil } -func SeriallyReconcilePoolMembers(client *gophercloud.ServiceClient, pool *pools.Pool, nodePort int, lbID string, nodes []*apiv1.Node) error { +func SeriallyReconcilePoolMembers(ctx context.Context, client *gophercloud.ServiceClient, pool *pools.Pool, nodePort int, lbID string, nodes []*apiv1.Node) error { - members, err := GetMembersbyPool(client, pool.ID) + members, err := GetMembersbyPool(ctx, client, pool.ID) if err != nil && !cpoerrors.IsNotFound(err) { return fmt.Errorf("error getting pool members %s: %v", pool.ID, err) } @@ -69,7 +69,7 @@ func SeriallyReconcilePoolMembers(client *gophercloud.ServiceClient, pool *pools } if !memberExists(members, addr, nodePort) { klog.V(2).Infof("Creating member for pool %s", pool.ID) - _, err := pools.CreateMember(context.TODO(), client, pool.ID, pools.CreateMemberOpts{ + _, err := pools.CreateMember(ctx, client, pool.ID, pools.CreateMemberOpts{ Name: cpoutil.CutString255(fmt.Sprintf("member_%s_%s_%d", node.Name, addr, nodePort)), ProtocolPort: nodePort, Address: addr, @@ -77,7 +77,7 @@ func SeriallyReconcilePoolMembers(client *gophercloud.ServiceClient, pool *pools if err != nil { return fmt.Errorf("error creating LB pool member for node: %s, %v", node.Name, err) } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return err } } else { @@ -88,11 +88,11 @@ func SeriallyReconcilePoolMembers(client *gophercloud.ServiceClient, pool *pools } for _, member := range members { klog.V(2).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) - err := pools.DeleteMember(context.TODO(), client, pool.ID, member.ID).ExtractErr() + err := pools.DeleteMember(ctx, client, pool.ID, member.ID).ExtractErr() if err != nil && !cpoerrors.IsNotFound(err) { return fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) } - if _, err := WaitActiveAndGetLoadBalancer(client, lbID); err != nil { + if _, err := WaitActiveAndGetLoadBalancer(ctx, client, lbID); err != nil { return err } } diff --git a/pkg/util/openstack/security_group.go b/pkg/util/openstack/security_group.go index 476d3d1b85..caee66697f 100644 --- a/pkg/util/openstack/security_group.go +++ b/pkg/util/openstack/security_group.go @@ -24,9 +24,9 @@ import ( "k8s.io/cloud-provider-openstack/pkg/metrics" ) -func GetSecurityGroupRules(client *gophercloud.ServiceClient, opts rules.ListOpts) ([]rules.SecGroupRule, error) { +func GetSecurityGroupRules(ctx context.Context, client *gophercloud.ServiceClient, opts rules.ListOpts) ([]rules.SecGroupRule, error) { mc := metrics.NewMetricContext("security_group_rule", "list") - page, err := rules.List(client, opts).AllPages(context.TODO()) + page, err := rules.List(client, opts).AllPages(ctx) if mc.ObserveRequest(err) != nil { return nil, err } diff --git a/tests/sanity/cinder/fakecloud.go b/tests/sanity/cinder/fakecloud.go index 85d8216402..e366a312b3 100644 --- a/tests/sanity/cinder/fakecloud.go +++ b/tests/sanity/cinder/fakecloud.go @@ -1,6 +1,7 @@ package sanity import ( + "context" "fmt" "math/rand" "strconv" @@ -35,7 +36,7 @@ func getfakecloud() *cloud { var _ openstack.IOpenStack = &cloud{} // Fake Cloud -func (cloud *cloud) CreateVolume(opts *volumes.CreateOpts, _ volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { +func (cloud *cloud) CreateVolume(_ context.Context, opts *volumes.CreateOpts, _ volumes.SchedulerHintOptsBuilder) (*volumes.Volume, error) { vol := &volumes.Volume{ ID: randString(10), Name: opts.Name, @@ -52,14 +53,14 @@ func (cloud *cloud) CreateVolume(opts *volumes.CreateOpts, _ volumes.SchedulerHi return vol, nil } -func (cloud *cloud) DeleteVolume(volumeID string) error { +func (cloud *cloud) DeleteVolume(_ context.Context, volumeID string) error { // delete the volume from cloud struct delete(cloud.volumes, volumeID) return nil } -func (cloud *cloud) AttachVolume(instanceID, volumeID string) (string, error) { +func (cloud *cloud) AttachVolume(_ context.Context, instanceID, volumeID string) (string, error) { // update the volume with attachment vol, ok := cloud.volumes[volumeID] @@ -78,7 +79,7 @@ func (cloud *cloud) AttachVolume(instanceID, volumeID string) (string, error) { return "", notFoundError() } -func (cloud *cloud) ListVolumes(limit int, marker string) ([]volumes.Volume, string, error) { +func (cloud *cloud) ListVolumes(_ context.Context, limit int, marker string) ([]volumes.Volume, string, error) { var vollist []volumes.Volume @@ -102,27 +103,27 @@ func (cloud *cloud) ListVolumes(limit int, marker string) ([]volumes.Volume, str return vollist, retToken, nil } -func (cloud *cloud) WaitDiskAttached(instanceID string, volumeID string) error { +func (cloud *cloud) WaitDiskAttached(_ context.Context, instanceID string, volumeID string) error { return nil } -func (cloud *cloud) DetachVolume(instanceID, volumeID string) error { +func (cloud *cloud) DetachVolume(_ context.Context, instanceID, volumeID string) error { return nil } -func (cloud *cloud) WaitDiskDetached(instanceID string, volumeID string) error { +func (cloud *cloud) WaitDiskDetached(_ context.Context, instanceID string, volumeID string) error { return nil } -func (cloud *cloud) WaitVolumeTargetStatus(volumeID string, tStatus []string) error { +func (cloud *cloud) WaitVolumeTargetStatus(_ context.Context, volumeID string, tStatus []string) error { return nil } -func (cloud *cloud) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) { +func (cloud *cloud) GetAttachmentDiskPath(_ context.Context, instanceID, volumeID string) (string, error) { return cinder.FakeDevicePath, nil } -func (cloud *cloud) GetVolumesByName(name string) ([]volumes.Volume, error) { +func (cloud *cloud) GetVolumesByName(_ context.Context, name string) ([]volumes.Volume, error) { var vlist []volumes.Volume for _, v := range cloud.volumes { if v.Name == name { @@ -134,8 +135,8 @@ func (cloud *cloud) GetVolumesByName(name string) ([]volumes.Volume, error) { return vlist, nil } -func (cloud *cloud) GetVolumeByName(n string) (*volumes.Volume, error) { - vols, err := cloud.GetVolumesByName(n) +func (cloud *cloud) GetVolumeByName(ctx context.Context, n string) (*volumes.Volume, error) { + vols, err := cloud.GetVolumesByName(ctx, n) if err != nil { return nil, err } @@ -151,7 +152,7 @@ func (cloud *cloud) GetVolumeByName(n string) (*volumes.Volume, error) { return &vols[0], nil } -func (cloud *cloud) GetVolume(volumeID string) (*volumes.Volume, error) { +func (cloud *cloud) GetVolume(_ context.Context, volumeID string) (*volumes.Volume, error) { vol, ok := cloud.volumes[volumeID] if !ok { @@ -169,7 +170,7 @@ func invalidError() error { return gophercloud.ErrUnexpectedResponseCode{Actual: 400} } -func (cloud *cloud) CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { +func (cloud *cloud) CreateSnapshot(_ context.Context, name, volID string, tags map[string]string) (*snapshots.Snapshot, error) { snap := &snapshots.Snapshot{ ID: randString(10), @@ -183,7 +184,7 @@ func (cloud *cloud) CreateSnapshot(name, volID string, tags map[string]string) ( return snap, nil } -func (cloud *cloud) ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) { +func (cloud *cloud) ListSnapshots(_ context.Context, filters map[string]string) ([]snapshots.Snapshot, string, error) { var snaplist []snapshots.Snapshot startingToken := filters["Marker"] limitfilter := filters["Limit"] @@ -222,14 +223,14 @@ func (cloud *cloud) ListSnapshots(filters map[string]string) ([]snapshots.Snapsh return snaplist, retToken, nil } -func (cloud *cloud) DeleteSnapshot(snapID string) error { +func (cloud *cloud) DeleteSnapshot(_ context.Context, snapID string) error { delete(cloud.snapshots, snapID) return nil } -func (cloud *cloud) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) { +func (cloud *cloud) GetSnapshotByID(_ context.Context, snapshotID string) (*snapshots.Snapshot, error) { snap, ok := cloud.snapshots[snapshotID] @@ -240,11 +241,11 @@ func (cloud *cloud) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, err return snap, nil } -func (cloud *cloud) WaitSnapshotReady(snapshotID string) (string, error) { +func (cloud *cloud) WaitSnapshotReady(_ context.Context, snapshotID string) (string, error) { return "available", nil } -func (cloud *cloud) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { +func (cloud *cloud) CreateBackup(_ context.Context, name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) { backup := &backups.Backup{ ID: randString(10), @@ -260,7 +261,7 @@ func (cloud *cloud) CreateBackup(name, volID, snapshotID, availabilityZone strin return backup, nil } -func (cloud *cloud) ListBackups(filters map[string]string) ([]backups.Backup, error) { +func (cloud *cloud) ListBackups(_ context.Context, filters map[string]string) ([]backups.Backup, error) { var backuplist []backups.Backup startingToken := filters["Marker"] limitfilter := filters["Limit"] @@ -295,13 +296,13 @@ func (cloud *cloud) ListBackups(filters map[string]string) ([]backups.Backup, er return backuplist, nil } -func (cloud *cloud) DeleteBackup(backupID string) error { +func (cloud *cloud) DeleteBackup(_ context.Context, backupID string) error { delete(cloud.backups, backupID) return nil } -func (cloud *cloud) GetBackupByID(backupID string) (*backups.Backup, error) { +func (cloud *cloud) GetBackupByID(_ context.Context, backupID string) (*backups.Backup, error) { backup, ok := cloud.backups[backupID] if !ok { @@ -315,7 +316,7 @@ func (cloud *cloud) BackupsAreEnabled() (bool, error) { return true, nil } -func (cloud *cloud) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { +func (cloud *cloud) WaitBackupReady(_ context.Context, backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) { return "", nil } @@ -328,7 +329,7 @@ func randString(n int) string { return string(b) } -func (cloud *cloud) GetInstanceByID(instanceID string) (*servers.Server, error) { +func (cloud *cloud) GetInstanceByID(_ context.Context, instanceID string) (*servers.Server, error) { if _, ok := cloud.instances[cinder.FakeInstanceID]; !ok { cloud.instances[cinder.FakeInstanceID] = &servers.Server{} } @@ -341,7 +342,7 @@ func (cloud *cloud) GetInstanceByID(instanceID string) (*servers.Server, error) return inst, nil } -func (cloud *cloud) ExpandVolume(volumeID string, status string, size int) error { +func (cloud *cloud) ExpandVolume(_ context.Context, volumeID string, status string, size int) error { return nil } @@ -359,6 +360,6 @@ func (cloud *cloud) GetBlockStorageOpts() openstack.BlockStorageOpts { return openstack.BlockStorageOpts{} } -func (cloud *cloud) ResolveVolumeListToUUIDs(v string) (string, error) { +func (cloud *cloud) ResolveVolumeListToUUIDs(_ context.Context, v string) (string, error) { return v, nil } diff --git a/tests/sanity/manila/fakemanilaclient.go b/tests/sanity/manila/fakemanilaclient.go index 1d86fae09c..a1531485bb 100644 --- a/tests/sanity/manila/fakemanilaclient.go +++ b/tests/sanity/manila/fakemanilaclient.go @@ -17,6 +17,7 @@ limitations under the License. package sanity import ( + "context" "fmt" "strconv" @@ -41,7 +42,7 @@ var ( type fakeManilaClientBuilder struct{} -func (b fakeManilaClientBuilder) New(o *client.AuthOpts) (manilaclient.Interface, error) { +func (b fakeManilaClientBuilder) New(ctx context.Context, o *client.AuthOpts) (manilaclient.Interface, error) { return &fakeManilaClient{}, nil } @@ -73,7 +74,7 @@ func (c fakeManilaClient) GetMicroversion() string { func (c fakeManilaClient) SetMicroversion(_ string) { } -func (c fakeManilaClient) GetShareByID(shareID string) (*shares.Share, error) { +func (c fakeManilaClient) GetShareByID(_ context.Context, shareID string) (*shares.Share, error) { s, ok := fakeShares[strToInt(shareID)] if !ok { return nil, gophercloud.ErrResourceNotFound{} @@ -82,7 +83,7 @@ func (c fakeManilaClient) GetShareByID(shareID string) (*shares.Share, error) { return s, nil } -func (c fakeManilaClient) GetShareByName(shareName string) (*shares.Share, error) { +func (c fakeManilaClient) GetShareByName(ctx context.Context, shareName string) (*shares.Share, error) { var shareID string for _, share := range fakeShares { if share.Name == shareName { @@ -95,10 +96,10 @@ func (c fakeManilaClient) GetShareByName(shareName string) (*shares.Share, error return nil, gophercloud.ErrResourceNotFound{} } - return c.GetShareByID(shareID) + return c.GetShareByID(ctx, shareID) } -func (c fakeManilaClient) CreateShare(opts shares.CreateOptsBuilder) (*shares.Share, error) { +func (c fakeManilaClient) CreateShare(_ context.Context, opts shares.CreateOptsBuilder) (*shares.Share, error) { var res shares.CreateResult res.Body = opts @@ -118,7 +119,7 @@ func (c fakeManilaClient) CreateShare(opts shares.CreateOptsBuilder) (*shares.Sh return share, nil } -func (c fakeManilaClient) DeleteShare(shareID string) error { +func (c fakeManilaClient) DeleteShare(_ context.Context, shareID string) error { id := strToInt(shareID) if _, ok := fakeShares[id]; !ok { return gophercloud.ErrResourceNotFound{} @@ -128,8 +129,8 @@ func (c fakeManilaClient) DeleteShare(shareID string) error { return nil } -func (c fakeManilaClient) ExtendShare(shareID string, opts shares.ExtendOptsBuilder) error { - share, err := c.GetShareByID(shareID) +func (c fakeManilaClient) ExtendShare(ctx context.Context, shareID string, opts shares.ExtendOptsBuilder) error { + share, err := c.GetShareByID(ctx, shareID) if err != nil { return err } @@ -151,7 +152,7 @@ func (c fakeManilaClient) ExtendShare(shareID string, opts shares.ExtendOptsBuil return nil } -func (c fakeManilaClient) GetExportLocations(shareID string) ([]shares.ExportLocation, error) { +func (c fakeManilaClient) GetExportLocations(_ context.Context, shareID string) ([]shares.ExportLocation, error) { if !shareExists(shareID) { return nil, gophercloud.ErrResourceNotFound{} } @@ -159,15 +160,15 @@ func (c fakeManilaClient) GetExportLocations(shareID string) ([]shares.ExportLoc return []shares.ExportLocation{{Path: "fake-server:/fake-path"}}, nil } -func (c fakeManilaClient) SetShareMetadata(shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) { +func (c fakeManilaClient) SetShareMetadata(_ context.Context, shareID string, opts shares.SetMetadataOptsBuilder) (map[string]string, error) { return nil, nil } -func (c fakeManilaClient) GetExtraSpecs(shareTypeID string) (sharetypes.ExtraSpecs, error) { +func (c fakeManilaClient) GetExtraSpecs(_ context.Context, shareTypeID string) (sharetypes.ExtraSpecs, error) { return map[string]interface{}{"snapshot_support": "True", "create_share_from_snapshot_support": "True"}, nil } -func (c fakeManilaClient) GetShareTypes() ([]sharetypes.ShareType, error) { +func (c fakeManilaClient) GetShareTypes(_ context.Context) ([]sharetypes.ShareType, error) { return []sharetypes.ShareType{ { ID: "914dbaad-7242-4c34-a9ee-aa3831189972", @@ -179,11 +180,11 @@ func (c fakeManilaClient) GetShareTypes() ([]sharetypes.ShareType, error) { }, nil } -func (c fakeManilaClient) GetShareTypeIDFromName(shareTypeName string) (string, error) { +func (c fakeManilaClient) GetShareTypeIDFromName(_ context.Context, shareTypeName string) (string, error) { return "", nil } -func (c fakeManilaClient) GetAccessRights(shareID string) ([]shares.AccessRight, error) { +func (c fakeManilaClient) GetAccessRights(_ context.Context, shareID string) ([]shares.AccessRight, error) { if !shareExists(shareID) { return nil, gophercloud.ErrResourceNotFound{} } @@ -198,7 +199,7 @@ func (c fakeManilaClient) GetAccessRights(shareID string) ([]shares.AccessRight, return accessRights, nil } -func (c fakeManilaClient) GrantAccess(shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) { +func (c fakeManilaClient) GrantAccess(_ context.Context, shareID string, opts shares.GrantAccessOptsBuilder) (*shares.AccessRight, error) { if !shareExists(shareID) { return nil, gophercloud.ErrResourceNotFound{} } @@ -221,7 +222,7 @@ func (c fakeManilaClient) GrantAccess(shareID string, opts shares.GrantAccessOpt return accessRight, nil } -func (c fakeManilaClient) GetSnapshotByID(snapID string) (*snapshots.Snapshot, error) { +func (c fakeManilaClient) GetSnapshotByID(_ context.Context, snapID string) (*snapshots.Snapshot, error) { s, ok := fakeSnapshots[strToInt(snapID)] if !ok { return nil, gophercloud.ErrUnexpectedResponseCode{Actual: 404} @@ -230,7 +231,7 @@ func (c fakeManilaClient) GetSnapshotByID(snapID string) (*snapshots.Snapshot, e return s, nil } -func (c fakeManilaClient) GetSnapshotByName(snapName string) (*snapshots.Snapshot, error) { +func (c fakeManilaClient) GetSnapshotByName(ctx context.Context, snapName string) (*snapshots.Snapshot, error) { var snapID string for _, snap := range fakeSnapshots { if snap.Name == snapName { @@ -243,10 +244,10 @@ func (c fakeManilaClient) GetSnapshotByName(snapName string) (*snapshots.Snapsho return nil, gophercloud.ErrResourceNotFound{Name: snapName, ResourceType: "snapshot"} } - return c.GetSnapshotByID(snapID) + return c.GetSnapshotByID(ctx, snapID) } -func (c fakeManilaClient) CreateSnapshot(opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) { +func (c fakeManilaClient) CreateSnapshot(_ context.Context, opts snapshots.CreateOptsBuilder) (*snapshots.Snapshot, error) { var res snapshots.CreateResult res.Body = opts @@ -268,7 +269,7 @@ func (c fakeManilaClient) CreateSnapshot(opts snapshots.CreateOptsBuilder) (*sna return snap, nil } -func (c fakeManilaClient) DeleteSnapshot(snapID string) error { +func (c fakeManilaClient) DeleteSnapshot(_ context.Context, snapID string) error { id := strToInt(snapID) if _, ok := fakeSnapshots[id]; !ok { return gophercloud.ErrResourceNotFound{} @@ -278,6 +279,6 @@ func (c fakeManilaClient) DeleteSnapshot(snapID string) error { return nil } -func (c fakeManilaClient) GetUserMessages(opts messages.ListOptsBuilder) ([]messages.Message, error) { +func (c fakeManilaClient) GetUserMessages(_ context.Context, opts messages.ListOptsBuilder) ([]messages.Message, error) { return nil, nil }