diff --git a/commands/cmd_show_restore_points.go b/commands/cmd_show_restore_points.go index 0a4b865..62cf500 100644 --- a/commands/cmd_show_restore_points.go +++ b/commands/cmd_show_restore_points.go @@ -45,6 +45,17 @@ func makeCmdShowRestorePoints() *CmdShowRestorePoints { showRestorePointsOptions.ConfigDirectory = newCmd.parser.String("config-directory", "", util.GetOptionalFlagMsg("Directory where "+vclusterops.ConfigFileName+" is located")) + showRestorePointsOptions.FilterOptions.ArchiveName = newCmd.parser.String("restore-point-archive", "", + util.GetOptionalFlagMsg("Archive name to filter restore points with")) + showRestorePointsOptions.FilterOptions.ArchiveID = newCmd.parser.String("restore-point-id", "", + util.GetOptionalFlagMsg("ID to filter restore points with")) + showRestorePointsOptions.FilterOptions.ArchiveIndex = newCmd.parser.String("restore-point-index", "", + util.GetOptionalFlagMsg("Index to filter restore points with")) + showRestorePointsOptions.FilterOptions.StartTimestamp = newCmd.parser.String("start-timestamp", "", + util.GetOptionalFlagMsg("Only show restores points created no earlier than this")) + showRestorePointsOptions.FilterOptions.EndTimestamp = newCmd.parser.String("end-timestamp", "", + util.GetOptionalFlagMsg("Only show restores points created no later than this")) + newCmd.showRestorePointsOptions = &showRestorePointsOptions return newCmd diff --git a/commands/cmd_stop_db.go b/commands/cmd_stop_db.go index 244c3e8..75e306b 100644 --- a/commands/cmd_stop_db.go +++ b/commands/cmd_stop_db.go @@ -17,6 +17,7 @@ package commands import ( "flag" + "fmt" "strconv" "github.com/vertica/vcluster/vclusterops" @@ -58,6 +59,10 @@ func makeCmdStopDB() *CmdStopDB { util.GetOptionalFlagMsg("Forcefully use the user's input instead of reading the options from "+vclusterops.ConfigFileName)) stopDBOptions.ConfigDirectory = newCmd.parser.String("config-directory", "", util.GetOptionalFlagMsg("Directory where "+vclusterops.ConfigFileName+" is located")) + stopDBOptions.Sandbox = newCmd.parser.String("sandbox", "", + util.GetOptionalFlagMsg("Name of the sandbox where Database has to be stopped")) + stopDBOptions.MainCluster = newCmd.parser.Bool("main-cluster-only", false, util.GetOptionalFlagMsg("stop db only on the main cluster"+ + " Use it when there are sandboxes involved ")) // Eon flags newCmd.isEon = newCmd.parser.Bool("eon-mode", false, util.GetEonFlagMsg("indicate if the database is an Eon db."+ @@ -110,7 +115,6 @@ func (c *CmdStopDB) Parse(inputArgv []string, logger vlog.Printer) error { if !util.IsOptionSet(c.parser, "config-directory") { c.stopDBOptions.ConfigDirectory = nil } - return c.validateParse(logger) } @@ -142,7 +146,17 @@ func (c *CmdStopDB) Run(vcc vclusterops.VClusterCommands) error { vcc.Log.Error(err, "failed to stop the database") return err } - - vcc.Log.PrintInfo("Stopped a database with name %s", *options.DBName) + msg := fmt.Sprintf("Stopped a database with name %s", *options.DBName) + if *options.Sandbox != "" { + sandboxMsg := fmt.Sprintf(" on sandbox %s", *options.Sandbox) + vcc.Log.PrintInfo(msg + sandboxMsg) + return nil + } + if *options.MainCluster { + stopMsg := " on main cluster" + vcc.Log.PrintInfo(msg + stopMsg) + return nil + } + vcc.Log.PrintInfo(msg) return nil } diff --git a/vclusterops/cluster_op_engine_context.go b/vclusterops/cluster_op_engine_context.go index 0e11d07..a2ef138 100644 --- a/vclusterops/cluster_op_engine_context.go +++ b/vclusterops/cluster_op_engine_context.go @@ -27,7 +27,7 @@ type opEngineExecContext struct { // This field is specifically used for sandboxing // as sandboxing requires all nodes in the subcluster to be sandboxed to be UP. upScInfo map[string]string // map with UP hosts as keys and their subcluster names as values. - sandboxingHosts []string // List of hosts that should run sandboxing command + upHostsToSandboxes map[string]string // map with UP hosts as keys and their corresponding sandbox names as values. defaultSCName string // store the default subcluster name of the database hostsWithLatestCatalog []string primaryHostsWithLatestCatalog []string diff --git a/vclusterops/https_check_db_running_op.go b/vclusterops/https_check_db_running_op.go index d5fe284..17797c7 100644 --- a/vclusterops/https_check_db_running_op.go +++ b/vclusterops/https_check_db_running_op.go @@ -34,6 +34,8 @@ const ( StopDB StartDB ReviveDB + + opName = "HTTPSCheckDBRunningOp" ) func (op opType) String() string { @@ -66,7 +68,9 @@ func (e *DBIsRunningError) Error() string { type httpsCheckRunningDBOp struct { opBase opHTTPSBase - opType opType + opType opType + sandbox string // check if DB is running on specified sandbox + mainCluster bool // check if DB is running on the main cluster. } func makeHTTPSCheckRunningDBOp(logger vlog.Printer, hosts []string, @@ -74,11 +78,32 @@ func makeHTTPSCheckRunningDBOp(logger vlog.Printer, hosts []string, httpsPassword *string, operationType opType, ) (httpsCheckRunningDBOp, error) { op := httpsCheckRunningDBOp{} - op.name = "HTTPSCheckDBRunningOp" + op.name = opName op.logger = logger.WithName(op.name) op.hosts = hosts op.useHTTPPassword = useHTTPPassword + err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) + if err != nil { + return op, err + } + op.userName = userName + op.httpsPassword = httpsPassword + op.opType = operationType + return op, nil +} + +func makeHTTPSCheckRunningDBWithSandboxOp(logger vlog.Printer, hosts []string, + useHTTPPassword bool, userName string, sandbox string, mainCluster bool, + httpsPassword *string, operationType opType, +) (httpsCheckRunningDBOp, error) { + op := httpsCheckRunningDBOp{} + op.name = opName + op.logger = logger.WithName(op.name) + op.hosts = hosts + op.useHTTPPassword = useHTTPPassword + op.sandbox = sandbox // check if DB is running on specified sandbox + op.mainCluster = mainCluster // check if DB is running on the main cluster err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) if err != nil { return op, err @@ -198,6 +223,25 @@ func (op *httpsCheckRunningDBOp) isDBRunningOnHost(host string, return status, msg, nil } +func (op *httpsCheckRunningDBOp) accumulateSandboxedAndMainHosts(sandboxingHosts map[string]string, + mainClusterHosts map[string]struct{}, nodesState *nodesStateInfo) { + if op.sandbox == "" || !op.mainCluster { + return + } + + nodeList := nodesState.NodeList + if len(nodeList) > 0 { + for _, node := range nodeList { + if node.Sandbox == op.sandbox && op.sandbox != "" { + sandboxingHosts[node.Address] = node.State + } + if op.mainCluster && node.Sandbox == "" { + mainClusterHosts[node.Address] = struct{}{} + } + } + } +} + // processResult will look at all of the results that come back from the hosts. // We don't return an error if all of the nodes are down. Otherwise, an error is // returned. @@ -209,6 +253,8 @@ func (op *httpsCheckRunningDBOp) processResult(_ *opEngineExecContext) error { upHosts := make(map[string]bool) downHosts := make(map[string]bool) exceptionHosts := make(map[string]bool) + sandboxedHosts := make(map[string]string) + mainClusterHosts := make(map[string]struct{}) // print msg msg := "" for host, result := range op.clusterHTTPRequest.ResultCollection { @@ -226,6 +272,7 @@ func (op *httpsCheckRunningDBOp) processResult(_ *opEngineExecContext) error { } upHosts[host] = true + // a passing result means that the db isn't down nodesStates := nodesStateInfo{} err := op.parseAndCheckResponse(host, result.content, &nodesStates) @@ -237,6 +284,9 @@ func (op *httpsCheckRunningDBOp) processResult(_ *opEngineExecContext) error { msg = result.content continue } + + op.accumulateSandboxedAndMainHosts(sandboxedHosts, mainClusterHosts, &nodesStates) + status, checkMsg, err := op.isDBRunningOnHost(host, &nodesStates, result) if err != nil { return fmt.Errorf("[%s] error happened during checking DB running on host %s, details: %w", @@ -247,13 +297,16 @@ func (op *httpsCheckRunningDBOp) processResult(_ *opEngineExecContext) error { msg = checkMsg } - return op.handleDBRunning(allErrs, msg, upHosts, downHosts, exceptionHosts) + return op.handleDBRunning(allErrs, msg, upHosts, downHosts, exceptionHosts, sandboxedHosts, mainClusterHosts) } -func (op *httpsCheckRunningDBOp) handleDBRunning(allErrs error, msg string, upHosts, downHosts, exceptionHosts map[string]bool) error { - op.logger.Info("check db running results", "up hosts", upHosts, "down hosts", downHosts, "hosts with status unknown", exceptionHosts) - // no DB is running on hosts, return a passed result - if len(upHosts) == 0 { +func (op *httpsCheckRunningDBOp) handleDBRunning(allErrs error, msg string, upHosts, downHosts, exceptionHosts map[string]bool, + sandboxedHosts map[string]string, mainClusterHosts map[string]struct{}) error { + op.logger.Info("check db running results", "up hosts", upHosts, "down hosts", downHosts, "hosts with status unknown", exceptionHosts, + "sandboxed hosts", sandboxedHosts) + + dbDown := op.checkProcessedResult(sandboxedHosts, mainClusterHosts, upHosts) + if dbDown { return nil } op.logger.Info("Check DB running", "detail", msg) @@ -273,6 +326,46 @@ func (op *httpsCheckRunningDBOp) handleDBRunning(allErrs error, msg string, upHo return errors.Join(allErrs, &DBIsRunningError{Detail: msg}) } +func (op *httpsCheckRunningDBOp) checkProcessedResult(sandboxedHosts map[string]string, + mainClusterHosts map[string]struct{}, upHosts map[string]bool) bool { + // no DB is running on hosts, return a passed result + if len(upHosts) == 0 { + if op.sandbox != "" || op.mainCluster { + op.logger.PrintWarning("All the nodes in the database are down") + } + return true + } + + // Check if any of the sandboxed hosts is UP + // sandboxedHosts would be empty if op.sandbox is "" + isSandboxUp := false + for host := range sandboxedHosts { + if _, ok := upHosts[host]; ok { + isSandboxUp = true + break + } + } + + isMainHostUp := false + for host := range mainClusterHosts { + if _, ok := upHosts[host]; ok { + isMainHostUp = true + break + } + } + + // If all sandboxed hosts are down, DB is down for the given sandbox + if !isSandboxUp && op.sandbox != "" { + op.logger.Info("all hosts in the sandbox: " + op.sandbox + " are down") + return true + } + if !isMainHostUp && op.mainCluster { + op.logger.Info("all hosts in the main cluster are down") + return true + } + return false +} + func (op *httpsCheckRunningDBOp) execute(execContext *opEngineExecContext) error { op.logger.Info("Execute() called", "opType", op.opType) switch op.opType { diff --git a/vclusterops/https_check_subcluster_sandbox_op.go b/vclusterops/https_check_subcluster_sandbox_op.go index 40cfcab..46d8e38 100644 --- a/vclusterops/https_check_subcluster_sandbox_op.go +++ b/vclusterops/https_check_subcluster_sandbox_op.go @@ -88,8 +88,8 @@ type scResps struct { func (op *httpsCheckSubclusterSandboxOp) processResult(execContext *opEngineExecContext) error { var allErrs error keysToRemove := make(map[string]struct{}) - existingSandboxedHosts := make(map[string]struct{}) - mainClusterHosts := make(map[string]struct{}) + existingSandboxedHosts := make(map[string]string) + mainClusterHosts := make(map[string]string) for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) @@ -136,47 +136,48 @@ func (op *httpsCheckSubclusterSandboxOp) processResult(execContext *opEngineExec // Process sandboxing info for _, scInfo := range subclusterResp.SCInfoList { - mainHosts, removeHosts, sandboxedHosts := op.processScInfo(scInfo, execContext) + mainHosts, sandboxedHosts, removeHosts := op.processScInfo(scInfo, execContext) // Accumulate maincluster hosts, hosts to be removed // and hosts that are sandboxed - for _, host := range mainHosts { - mainClusterHosts[host] = struct{}{} + for host, sb := range mainHosts { + mainClusterHosts[host] = sb } for h := range removeHosts { keysToRemove[h] = struct{}{} } - for h := range sandboxedHosts { - existingSandboxedHosts[h] = struct{}{} + for h, sb := range sandboxedHosts { + existingSandboxedHosts[h] = sb } } } // Use updated scInfo - for host := range existingSandboxedHosts { + for host, sb := range existingSandboxedHosts { // Just need one up host from the existing sandbox // This will be used to add new subcluster to an existing sandbox - execContext.sandboxingHosts = append(execContext.sandboxingHosts, host) + execContext.upHostsToSandboxes[host] = sb break } - for host := range mainClusterHosts { + for host, sb := range mainClusterHosts { if _, exists := keysToRemove[host]; !exists { // Just one up host from main cluster - execContext.sandboxingHosts = append(execContext.sandboxingHosts, host) + execContext.upHostsToSandboxes[host] = sb break } } return allErrs } func (op *httpsCheckSubclusterSandboxOp) processScInfo(scInfo subclusterSandboxInfo, - execContext *opEngineExecContext) (mainClusterHosts []string, keysToRemove, existingSandboxedHosts map[string]struct{}) { + execContext *opEngineExecContext) (mainClusterHosts, existingSandboxedHosts map[string]string, keysToRemove map[string]struct{}) { keysToRemove = make(map[string]struct{}) + mainClusterHosts = make(map[string]string) for host, sc := range execContext.upScInfo { if scInfo.Sandbox != "" && scInfo.SCName == sc { keysToRemove, existingSandboxedHosts = op.processSandboxedSCInfo(scInfo, sc, host) } else { if scInfo.SCName == sc { - mainClusterHosts = append(mainClusterHosts, host) + mainClusterHosts[host] = scInfo.Sandbox } // We do not want a host from the sc to be sandboxed to be the initiator if sc == op.ScToSandbox { @@ -188,16 +189,16 @@ func (op *httpsCheckSubclusterSandboxOp) processScInfo(scInfo subclusterSandboxI } func (op *httpsCheckSubclusterSandboxOp) processSandboxedSCInfo(scInfo subclusterSandboxInfo, - sc, host string) (keysToRemove, existingSandboxedHosts map[string]struct{}) { + sc, host string) (keysToRemove map[string]struct{}, existingSandboxedHosts map[string]string) { keysToRemove = make(map[string]struct{}) - existingSandboxedHosts = make(map[string]struct{}) + existingSandboxedHosts = make(map[string]string) if scInfo.Sandbox != op.Sandbox { op.logger.Info("subcluster " + sc + " is sandboxed") if scInfo.SCName == sc { keysToRemove[host] = struct{}{} } } else { - existingSandboxedHosts[host] = struct{}{} + existingSandboxedHosts[host] = scInfo.Sandbox } return } diff --git a/vclusterops/https_get_up_nodes_op.go b/vclusterops/https_get_up_nodes_op.go index ed90b96..576d7a8 100644 --- a/vclusterops/https_get_up_nodes_op.go +++ b/vclusterops/https_get_up_nodes_op.go @@ -20,6 +20,7 @@ import ( "fmt" "sort" + mapset "github.com/deckarep/golang-set/v2" "github.com/vertica/vcluster/vclusterops/util" "github.com/vertica/vcluster/vclusterops/vlog" ) @@ -31,6 +32,8 @@ const ( ScrutinizeCmd DBAddSubclusterCmd InstallPackageCmd + + oper = "HTTPSGetUpNodesOp" ) type CommandType int @@ -41,18 +44,22 @@ type httpsGetUpNodesOp struct { DBName string noUpHostsOk bool cmdType CommandType + sandbox string + mainCluster bool } func makeHTTPSGetUpNodesOp(logger vlog.Printer, dbName string, hosts []string, useHTTPPassword bool, userName string, httpsPassword *string, cmdType CommandType, ) (httpsGetUpNodesOp, error) { op := httpsGetUpNodesOp{} - op.name = "HTTPSGetUpNodesOp" + op.name = oper op.logger = logger.WithName(op.name) op.hosts = hosts op.useHTTPPassword = useHTTPPassword op.DBName = dbName op.cmdType = cmdType + op.sandbox = "" + op.mainCluster = false if useHTTPPassword { err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) @@ -65,6 +72,15 @@ func makeHTTPSGetUpNodesOp(logger vlog.Printer, dbName string, hosts []string, return op, nil } +func makeHTTPSGetUpNodesWithSandboxOp(logger vlog.Printer, dbName string, hosts []string, + useHTTPPassword bool, userName string, httpsPassword *string, cmdType CommandType, + sandbox string, mainCluster bool) (httpsGetUpNodesOp, error) { + op, err := makeHTTPSGetUpNodesOp(logger, dbName, hosts, useHTTPPassword, userName, httpsPassword, cmdType) + op.sandbox = sandbox + op.mainCluster = mainCluster + return op, err +} + func (op *httpsGetUpNodesOp) allowNoUpHosts() { op.noUpHostsOk = true } @@ -125,11 +141,11 @@ func (op *httpsGetUpNodesOp) execute(execContext *opEngineExecContext) error { func (op *httpsGetUpNodesOp) processResult(execContext *opEngineExecContext) error { var allErrs error - upHosts := make(map[string]struct{}) + upHosts := mapset.NewSet[string]() upScInfo := make(map[string]string) exceptionHosts := []string{} downHosts := []string{} - + sandboxInfo := make(map[string]string) for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) if !result.isPassing() { @@ -158,23 +174,17 @@ func (op *httpsGetUpNodesOp) processResult(execContext *opEngineExecContext) err } // collect all the up hosts - for _, node := range nodesStates.NodeList { - if node.Database != op.DBName { - err = fmt.Errorf(`[%s] database %s is running on host %s, rather than database %s`, op.name, node.Database, host, op.DBName) - allErrs = errors.Join(allErrs, err) - break - } - if node.State == util.NodeUpState { - upHosts[node.Address] = struct{}{} - upScInfo[node.Address] = node.Subcluster - } + err = op.collectUpHosts(nodesStates, host, upHosts, upScInfo, sandboxInfo) + if err != nil { + allErrs = errors.Join(allErrs, err) + break } - if len(upHosts) > 0 && op.cmdType != SandboxCmd { + if upHosts.Cardinality() > 0 && op.cmdType != SandboxCmd && op.cmdType != StopDBCmd { break } } - - ignoreErrors := op.processHostLists(upHosts, upScInfo, exceptionHosts, downHosts, execContext) + execContext.upHostsToSandboxes = sandboxInfo + ignoreErrors := op.processHostLists(upHosts, upScInfo, exceptionHosts, downHosts, sandboxInfo, execContext) if ignoreErrors { return nil } @@ -186,21 +196,40 @@ func (op *httpsGetUpNodesOp) finalize(_ *opEngineExecContext) error { return nil } +func (op *httpsGetUpNodesOp) checkSandboxUp(sandboxingInfo map[string]string, sandbox string) bool { + for _, sb := range sandboxingInfo { + if sb == sandbox { + return true + } + } + return false +} + // processHostLists stashes the up hosts, and if there are no up hosts, prints and logs // down or erratic hosts. Additionally, it determines if the op should fail or not. -func (op *httpsGetUpNodesOp) processHostLists(upHosts map[string]struct{}, upScInfo map[string]string, - exceptionHosts, downHosts []string, +func (op *httpsGetUpNodesOp) processHostLists(upHosts mapset.Set[string], upScInfo map[string]string, + exceptionHosts, downHosts []string, sandboxInfo map[string]string, execContext *opEngineExecContext) (ignoreErrors bool) { execContext.upScInfo = upScInfo - if len(upHosts) > 0 { - for host := range upHosts { - execContext.upHosts = append(execContext.upHosts, host) + + if op.sandbox != "" { + upSandbox := op.checkSandboxUp(sandboxInfo, op.sandbox) + if !upSandbox { + op.logger.PrintError(`[%s] There are no UP nodes in the sandbox %s. The db %s is already down`, op.name, op.sandbox, op.DBName) } + } + if op.mainCluster { + upMainCluster := op.checkSandboxUp(sandboxInfo, "") + if !upMainCluster { + op.logger.PrintError(`[%s] There are no UP nodes in the main cluster. The db %s is already down`, op.name, op.DBName) + } + } + if upHosts.Cardinality() > 0 { + execContext.upHosts = upHosts.ToSlice() // sorting the up hosts will be helpful for picking up the initiator in later instructions sort.Strings(execContext.upHosts) return true } - if len(exceptionHosts) > 0 { op.logger.PrintError(`[%s] fail to call https endpoint of database %s on hosts %s`, op.name, op.DBName, exceptionHosts) } @@ -211,3 +240,26 @@ func (op *httpsGetUpNodesOp) processHostLists(upHosts map[string]struct{}, upScI return op.noUpHostsOk } + +func (op *httpsGetUpNodesOp) collectUpHosts(nodesStates nodesStateInfo, host string, + upHosts mapset.Set[string], upScInfo, sandboxInfo map[string]string) (err error) { + upMainNodeFound := false + for _, node := range nodesStates.NodeList { + if node.Database != op.DBName { + err = fmt.Errorf(`[%s] database %s is running on host %s, rather than database %s`, op.name, node.Database, host, op.DBName) + return + } + if node.State == util.NodeUpState { + upHosts.Add(node.Address) + upScInfo[node.Address] = node.Subcluster + if op.cmdType == StopDBCmd { + if node.Sandbox != "" || !upMainNodeFound { + sandboxInfo[node.Address] = node.Sandbox + // We still need one main cluster UP node, when there are sandboxes + upMainNodeFound = true + } + } + } + } + return +} diff --git a/vclusterops/https_sandbox_subcluster_op.go b/vclusterops/https_sandbox_subcluster_op.go index 743fb54..cdfd297 100644 --- a/vclusterops/https_sandbox_subcluster_op.go +++ b/vclusterops/https_sandbox_subcluster_op.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" ) type httpsSandboxingOp struct { @@ -31,10 +32,11 @@ type httpsSandboxingOp struct { } // This op is used to sandbox the given subcluster `scName` as `sandboxName` -func makeHTTPSandboxingOp(scName, sandboxName string, +func makeHTTPSandboxingOp(logger vlog.Printer, scName, sandboxName string, useHTTPPassword bool, userName string, httpsPassword *string) (httpsSandboxingOp, error) { op := httpsSandboxingOp{} op.name = "HTTPSSansboxingOp" + op.logger = logger.WithName(op.name) op.useHTTPPassword = useHTTPPassword op.scName = scName op.sandboxName = sandboxName @@ -76,11 +78,20 @@ func (op *httpsSandboxingOp) setupRequestBody() error { } func (op *httpsSandboxingOp) prepare(execContext *opEngineExecContext) error { - if len(execContext.sandboxingHosts) == 0 { + if len(execContext.upHostsToSandboxes) == 0 { return fmt.Errorf(`[%s] Cannot find any up hosts in OpEngineExecContext`, op.name) } // use shortlisted hosts to execute https post request, this host/hosts will be the initiator - hosts := execContext.sandboxingHosts + var hosts []string + var mainHost string + for h, sb := range execContext.upHostsToSandboxes { + if sb == "" { + mainHost = h + } else { + hosts = append(hosts, h) + } + } + hosts = append(hosts, mainHost) err := op.setupRequestBody() if err != nil { return err diff --git a/vclusterops/https_stop_db_op.go b/vclusterops/https_stop_db_op.go index 86fd578..29c429c 100644 --- a/vclusterops/https_stop_db_op.go +++ b/vclusterops/https_stop_db_op.go @@ -28,15 +28,19 @@ import ( type httpsStopDBOp struct { opBase opHTTPSBase + sandbox string + mainCluster bool RequestParams map[string]string } func makeHTTPSStopDBOp(logger vlog.Printer, useHTTPPassword bool, userName string, - httpsPassword *string, timeout *int) (httpsStopDBOp, error) { + httpsPassword *string, timeout *int, sandbox string, mainCluster bool) (httpsStopDBOp, error) { op := httpsStopDBOp{} op.name = "HTTPSStopDBOp" op.logger = logger.WithName(op.name) op.useHTTPPassword = useHTTPPassword + op.sandbox = sandbox + op.mainCluster = mainCluster // set the query params, "timeout" is optional op.RequestParams = make(map[string]string) @@ -72,11 +76,37 @@ func (op *httpsStopDBOp) setupClusterHTTPRequest(hosts []string) error { } func (op *httpsStopDBOp) prepare(execContext *opEngineExecContext) error { - if len(execContext.upHosts) == 0 { + // Stop db cases: + // case 1: stop db on a sandbox -- send stop db request to one UP host of the sandbox. + // case 2: stop db on the main cluster -- send stop db request to on UP host of the main cluster. + // case 3: stop db on every host -- send stop db request to one UP host of the given sandbox and to one UP host of the main cluster. + if len(execContext.upHostsToSandboxes) == 0 { return fmt.Errorf(`[%s] Cannot find any up hosts in OpEngineExecContext`, op.name) } - // use first up host to execute https post request - hosts := []string{execContext.upHosts[0]} + sandboxOnly := false + var mainHost string + var hosts []string + for h, sb := range execContext.upHostsToSandboxes { + if sb == op.sandbox && sb != "" { + // stop db only on sandbox + hosts = []string{h} + sandboxOnly = true + break + } + if sb == "" { + mainHost = h + } else { + hosts = append(hosts, h) + } + } + // Main cluster should run the command after sandboxes + if !sandboxOnly && op.sandbox == "" { + hosts = append(hosts, mainHost) + } + // Stop db on Main cluster only + if op.mainCluster { + hosts = []string{mainHost} + } execContext.dispatcher.setup(hosts) return op.setupClusterHTTPRequest(hosts) diff --git a/vclusterops/nma_show_restore_points_op.go b/vclusterops/nma_show_restore_points_op.go index 2f0799d..4c81657 100644 --- a/vclusterops/nma_show_restore_points_op.go +++ b/vclusterops/nma_show_restore_points_op.go @@ -28,12 +28,33 @@ type nmaShowRestorePointsOp struct { dbName string communalLocation string configurationParameters map[string]string + filterOptions *ShowRestorePointFilterOptions +} + +// Optional arguments to list only restore points that +// meet the specified condition(s) +type ShowRestorePointFilterOptions struct { + // Only list restore points with given archive name + ArchiveName *string + // Only list restore points created no earlier than this timestamp (must be UTC timezone) + StartTimestamp *string + // Only list restore points created no later than this timestamp (must be UTC timezone) + EndTimestamp *string + // Only list restore points with given ID + ArchiveID *string + // Only list restore points with given index + ArchiveIndex *string } type showRestorePointsRequestData struct { DBName string `json:"db_name"` CommunalLocation string `json:"communal_location"` Parameters map[string]string `json:"parameters,omitempty"` + ArchiveName string `json:"archive_name,omitempty"` + StartTimestamp string `json:"start_timestamp,omitempty"` + EndTimestamp string `json:"end_timestamp,omitempty"` + ArchiveID string `json:"archive_id,omitempty"` + ArchiveIndex string `json:"archive_index,omitempty"` } // This op is used to show restore points in a database @@ -48,9 +69,19 @@ func makeNMAShowRestorePointsOp(logger vlog.Printer, dbName: dbName, configurationParameters: configurationParameters, communalLocation: communalLocation, + filterOptions: nil, } } +// This op is used to show restore points in a database +func makeNMAShowRestorePointsOpWithFilterOptions(logger vlog.Printer, + hosts []string, dbName, communalLocation string, configurationParameters map[string]string, + filterOptions *ShowRestorePointFilterOptions) nmaShowRestorePointsOp { + op := makeNMAShowRestorePointsOp(logger, hosts, dbName, communalLocation, configurationParameters) + op.filterOptions = filterOptions + return op +} + // make https json data func (op *nmaShowRestorePointsOp) setupRequestBody() (map[string]string, error) { hostRequestBodyMap := make(map[string]string, len(op.hosts)) @@ -59,6 +90,23 @@ func (op *nmaShowRestorePointsOp) setupRequestBody() (map[string]string, error) requestData.DBName = op.dbName requestData.CommunalLocation = op.communalLocation requestData.Parameters = op.configurationParameters + if op.filterOptions != nil { + if op.filterOptions.ArchiveName != nil { + requestData.ArchiveName = *op.filterOptions.ArchiveName + } + if op.filterOptions.StartTimestamp != nil { + requestData.StartTimestamp = *op.filterOptions.StartTimestamp + } + if op.filterOptions.EndTimestamp != nil { + requestData.EndTimestamp = *op.filterOptions.EndTimestamp + } + if op.filterOptions.ArchiveID != nil { + requestData.ArchiveID = *op.filterOptions.ArchiveID + } + if op.filterOptions.ArchiveIndex != nil { + requestData.ArchiveIndex = *op.filterOptions.ArchiveIndex + } + } dataBytes, err := json.Marshal(requestData) if err != nil { @@ -118,6 +166,27 @@ type RestorePoint struct { VerticaVersion string `json:"vertica_version,omitempty"` } +/* +Sample response from the NMA restore-points endpoint: +[ + + { + "archive": "db", + "id": "4ee4119b-802c-4bb4-94b0-061c8748b602", + "index": 1, + "timestamp": "2023-05-02 14:10:31.038289", + "vertica_version": "v24.2.0-e6bb47b39502d8f4c6f68619f4d4a4648707fd42" + }, + { + "archive": "db", + "id": "bdaa4764-d8aa-4979-89e5-e642cc58d972", + "index": 2, + "timestamp": "2023-05-02 14:10:28.717667", + "vertica_version": "v24.2.0-e6bb47b39502d8f4c6f68619f4d4a4648707fd42" + } + +] +*/ func (op *nmaShowRestorePointsOp) processResult(execContext *opEngineExecContext) error { var allErrs error @@ -125,21 +194,6 @@ func (op *nmaShowRestorePointsOp) processResult(execContext *opEngineExecContext op.logResponse(host, result) if result.isPassing() { - /* [ - { - "archive": "db", - "id": "4ee4119b-802c-4bb4-94b0-061c8748b602", - "index": 1, - "timestamp": "2023-05-02 14:10:31.038289" - }, - { - "archive": "db", - "id": "bdaa4764-d8aa-4979-89e5-e642cc58d972", - "index": 2, - "timestamp": "2023-05-02 14:10:28.717667" - } - ] - */ var responseObj []RestorePoint err := op.parseAndCheckResponse(host, result.content, &responseObj) if err != nil { diff --git a/vclusterops/nma_show_restore_points_op_test.go b/vclusterops/nma_show_restore_points_op_test.go index 227413d..204cd24 100644 --- a/vclusterops/nma_show_restore_points_op_test.go +++ b/vclusterops/nma_show_restore_points_op_test.go @@ -26,6 +26,11 @@ func TestShowRestorePointsRequestBody(t *testing.T) { const hostName = "host1" const dbName = "testDB" const communalLocation = "/communal" + archiveName := "test_name" + archiveID := "test_ID" + archiveIndex := "test_index" + startTimestamp := "2006-01-02 15:04:05" + endTimestamp := "2006-01-02 15:04:06" op := makeNMAShowRestorePointsOp(vlog.Printer{}, []string{hostName}, dbName, communalLocation, nil) requestBody, err := op.setupRequestBody() @@ -35,4 +40,42 @@ func TestShowRestorePointsRequestBody(t *testing.T) { hostReq := requestBody[hostName] assert.Contains(t, hostReq, `"communal_location":"`+communalLocation+`"`) assert.Contains(t, hostReq, `"db_name":"`+dbName+`"`) + + op = makeNMAShowRestorePointsOpWithFilterOptions(vlog.Printer{}, []string{hostName}, + dbName, communalLocation, nil, &ShowRestorePointFilterOptions{ + ArchiveName: &archiveName, + ArchiveID: &archiveID, + ArchiveIndex: &archiveIndex, + StartTimestamp: &startTimestamp, + EndTimestamp: &endTimestamp, + }) + + requestBody, err = op.setupRequestBody() + assert.NoError(t, err) + assert.Len(t, requestBody, 1) + assert.Contains(t, requestBody, hostName) + hostReq = requestBody[hostName] + assert.Contains(t, hostReq, `"archive_name":"`+archiveName+`"`) + assert.Contains(t, hostReq, `"archive_id":"`+archiveID+`"`) + assert.Contains(t, hostReq, `"archive_index":"`+archiveIndex+`"`) + assert.Contains(t, hostReq, `"start_timestamp":"`+startTimestamp+`"`) + assert.Contains(t, hostReq, `"end_timestamp":"`+endTimestamp+`"`) + + op = makeNMAShowRestorePointsOpWithFilterOptions(vlog.Printer{}, []string{hostName}, + dbName, communalLocation, nil, &ShowRestorePointFilterOptions{ + ArchiveName: &archiveName, + ArchiveID: &archiveID, + ArchiveIndex: &archiveIndex, + }) + + requestBody, err = op.setupRequestBody() + assert.NoError(t, err) + assert.Len(t, requestBody, 1) + assert.Contains(t, requestBody, hostName) + hostReq = requestBody[hostName] + assert.Contains(t, hostReq, `"archive_name":"`+archiveName+`"`) + assert.Contains(t, hostReq, `"archive_id":"`+archiveID+`"`) + assert.Contains(t, hostReq, `"archive_index":"`+archiveIndex+`"`) + assert.NotContains(t, hostReq, `"start_timestamp"`) + assert.NotContains(t, hostReq, `"end_timestamp"`) } diff --git a/vclusterops/restore_points.go b/vclusterops/restore_points.go index 2227eef..01701b9 100644 --- a/vclusterops/restore_points.go +++ b/vclusterops/restore_points.go @@ -16,6 +16,7 @@ package vclusterops import ( + "errors" "fmt" "github.com/vertica/vcluster/vclusterops/util" @@ -24,6 +25,9 @@ import ( type VShowRestorePointsOptions struct { DatabaseOptions + // Optional arguments to list only restore points that + // meet the specified condition(s) + FilterOptions *ShowRestorePointFilterOptions } func VShowRestorePointsFactory() VShowRestorePointsOptions { @@ -31,21 +35,95 @@ func VShowRestorePointsFactory() VShowRestorePointsOptions { // set default values to the params opt.setDefaultValues() + opt.FilterOptions = &ShowRestorePointFilterOptions{ + ArchiveName: new(string), + StartTimestamp: new(string), + EndTimestamp: new(string), + ArchiveID: new(string), + ArchiveIndex: new(string), + } + return opt } +func (p *ShowRestorePointFilterOptions) hasNonEmptyStartTimestamp() bool { + return (p.StartTimestamp != nil && *p.StartTimestamp != "") +} + +func (p *ShowRestorePointFilterOptions) hasNonEmptyEndTimestamp() bool { + return (p.EndTimestamp != nil && *p.EndTimestamp != "") +} + +// Check that all non-empty timestamps specified have valid date time or date only format, +// convert date only format to date time format when applicable, and make sure end timestamp +// is no earlier than start timestamp +func (p *ShowRestorePointFilterOptions) ValidateAndStandardizeTimestampsIfAny() (err error) { + // shortcut of no validation needed + if !p.hasNonEmptyStartTimestamp() && !p.hasNonEmptyEndTimestamp() { + return nil + } + + // check each individual timestamp in terms of format + var dateTimeErr, dateOnlyErr error + + // try date time first + parsedStartDatetime, dateTimeErr := util.IsEmptyOrValidTimeStr(util.DefaultDateTimeFormat, p.StartTimestamp) + if dateTimeErr != nil { + // fallback to date only + parsedStartDatetime, dateOnlyErr = util.IsEmptyOrValidTimeStr(util.DefaultDateOnlyFormat, p.StartTimestamp) + if dateOnlyErr != nil { + // give up + return fmt.Errorf("start timestamp %q is invalid; cannot parse as a datetime: %w; "+ + "cannot parse as a date as well: %w", *p.StartTimestamp, dateTimeErr, dateOnlyErr) + } + // default value of time parsed from date only string is already indicating the start of a day + // invoke this function here to only rewrite p.StartTimestamp in date time format + util.FillInDefaultTimeForStartTimestamp(p.StartTimestamp) + } + + // try date time first + parsedEndDatetime, dateTimeErr := util.IsEmptyOrValidTimeStr(util.DefaultDateTimeFormat, p.EndTimestamp) + if dateTimeErr != nil { + // fallback to date only + _, dateOnlyErr = util.IsEmptyOrValidTimeStr(util.DefaultDateOnlyFormat, p.EndTimestamp) + if dateOnlyErr != nil { + // give up + return fmt.Errorf("end timestamp %q is invalid; cannot parse as a datetime: %w; "+ + "cannot parse as a date as well: %w", *p.EndTimestamp, dateTimeErr, dateOnlyErr) + } + // fill in default value for time and update the end timestamp + parsedEndDatetime = util.FillInDefaultTimeForEndTimestamp(p.EndTimestamp) + } + + // check if endTime is after start time if both of them are non-empty + if p.hasNonEmptyStartTimestamp() && p.hasNonEmptyEndTimestamp() { + validRange := util.IsTimeEqualOrAfter(*parsedStartDatetime, *parsedEndDatetime) + if !validRange { + return errors.New("start timestamp must be before end timestamp") + } + return nil + } + + return nil +} + func (opt *VShowRestorePointsOptions) validateParseOptions(logger vlog.Printer) error { err := opt.validateBaseOptions("show_restore_points", logger) if err != nil { return err } if *opt.HonorUserInput { - err := util.ValidateCommunalStorageLocation(*opt.CommunalStorageLocation) + err = util.ValidateCommunalStorageLocation(*opt.CommunalStorageLocation) if err != nil { return err } } + err = opt.FilterOptions.ValidateAndStandardizeTimestampsIfAny() + if err != nil { + return err + } + return nil } @@ -133,8 +211,8 @@ func (vcc *VClusterCommands) produceShowRestorePointsInstructions(options *VShow // require to have the same vertica version nmaVerticaVersionOp := makeNMAVerticaVersionOp(vcc.Log, hosts, true, true /*IsEon*/) - nmaShowRestorePointOp := makeNMAShowRestorePointsOp(vcc.Log, bootstrapHost, *options.DBName, - *options.CommunalStorageLocation, options.ConfigurationParameters) + nmaShowRestorePointOp := makeNMAShowRestorePointsOpWithFilterOptions(vcc.Log, bootstrapHost, *options.DBName, + *options.CommunalStorageLocation, options.ConfigurationParameters, options.FilterOptions) instructions = append(instructions, &nmaHealthOp, diff --git a/vclusterops/restore_points_test.go b/vclusterops/restore_points_test.go new file mode 100644 index 0000000..705e41f --- /dev/null +++ b/vclusterops/restore_points_test.go @@ -0,0 +1,87 @@ +package vclusterops + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestShowRestorePointFilterOptions_ValidateAndStandardizeTimestampsIfAny(t *testing.T) { + // Test case 1: No validation needed + filterOptions := ShowRestorePointFilterOptions{ + StartTimestamp: nil, + EndTimestamp: nil, + } + err := filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + + // Test case 2: Invalid start timestamp + startTimestamp := "invalid_start_timestamp" + filterOptions = ShowRestorePointFilterOptions{ + StartTimestamp: &startTimestamp, + EndTimestamp: nil, + } + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + expectedErr := fmt.Errorf("start timestamp %q is invalid;", startTimestamp) + assert.ErrorContains(t, err, expectedErr.Error()) + + // Test case 3: Invalid end timestamp + endTimestamp := "invalid_end_timestamp" + filterOptions = ShowRestorePointFilterOptions{ + StartTimestamp: nil, + EndTimestamp: &endTimestamp, + } + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + expectedErr = fmt.Errorf("end timestamp %q is invalid;", endTimestamp) + assert.ErrorContains(t, err, expectedErr.Error()) + + const earlierDate = "2022-01-01" + const laterDate = "2022-01-02" + + // Test case 4: Valid start and end timestamps + startTimestamp = earlierDate + " 00:00:00" + endTimestamp = laterDate + " 00:00:00" + filterOptions = ShowRestorePointFilterOptions{ + StartTimestamp: &startTimestamp, + EndTimestamp: &endTimestamp, + } + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + + startTimestamp = earlierDate + endTimestamp = laterDate + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + assert.Equal(t, earlierDate+" 00:00:00.000000000", *filterOptions.StartTimestamp) + assert.Equal(t, laterDate+" 23:59:59.999999999", *filterOptions.EndTimestamp) + + startTimestamp = earlierDate + endTimestamp = earlierDate + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + + startTimestamp = earlierDate + endTimestamp = laterDate + " 23:59:59" + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + + startTimestamp = earlierDate + " 01:01:01.010101010" + endTimestamp = laterDate + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + assert.Equal(t, startTimestamp, *filterOptions.StartTimestamp) + + startTimestamp = earlierDate + " 23:59:59" + endTimestamp = earlierDate + " 23:59:59.123456789" + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.NoError(t, err) + + // Test case 5: Start timestamp after end timestamp + filterOptions = ShowRestorePointFilterOptions{ + StartTimestamp: &endTimestamp, + EndTimestamp: &startTimestamp, + } + err = filterOptions.ValidateAndStandardizeTimestampsIfAny() + assert.EqualError(t, err, "start timestamp must be before end timestamp") +} diff --git a/vclusterops/revive_db.go b/vclusterops/revive_db.go index 137645e..c75f21f 100644 --- a/vclusterops/revive_db.go +++ b/vclusterops/revive_db.go @@ -18,6 +18,7 @@ package vclusterops import ( "fmt" "sort" + "strconv" "github.com/vertica/vcluster/vclusterops/util" ) @@ -331,8 +332,16 @@ func (vcc *VClusterCommands) producePreReviveDBInstructions(options *VReviveData hosts := options.Hosts initiator := getInitiator(hosts) bootstrapHost := []string{initiator} - nmaShowRestorePointsOp := makeNMAShowRestorePointsOp(vcc.Log, bootstrapHost, *options.DBName, - *options.CommunalStorageLocation, options.ConfigurationParameters) + filterOptions := ShowRestorePointFilterOptions{} + filterOptions.ArchiveName = options.RestorePoint.Archive + if options.hasValidRestorePointID() { + filterOptions.ArchiveID = options.RestorePoint.ID + } else { + indexStr := strconv.Itoa(*options.RestorePoint.Index) + filterOptions.ArchiveIndex = &indexStr + } + nmaShowRestorePointsOp := makeNMAShowRestorePointsOpWithFilterOptions(vcc.Log, bootstrapHost, *options.DBName, + *options.CommunalStorageLocation, options.ConfigurationParameters, &filterOptions) instructions = append(instructions, &nmaShowRestorePointsOp, ) diff --git a/vclusterops/sandbox.go b/vclusterops/sandbox.go index 608d3ed..145ac21 100644 --- a/vclusterops/sandbox.go +++ b/vclusterops/sandbox.go @@ -140,7 +140,7 @@ func (vcc *VClusterCommands) produceSandboxSubclusterInstructions(sandboxSubclus } // Run Sandboxing - httpsSandboxSubclusterOp, err := makeHTTPSandboxingOp(sandboxSubclusterInfo.SCName, sandboxSubclusterInfo.SandboxName, + httpsSandboxSubclusterOp, err := makeHTTPSandboxingOp(vcc.Log, sandboxSubclusterInfo.SCName, sandboxSubclusterInfo.SandboxName, usePassword, username, sandboxSubclusterInfo.Password) if err != nil { return instructions, err diff --git a/vclusterops/stop_db.go b/vclusterops/stop_db.go index 1a62a64..5e05291 100644 --- a/vclusterops/stop_db.go +++ b/vclusterops/stop_db.go @@ -27,10 +27,10 @@ type VStopDatabaseOptions struct { DatabaseOptions /* part 2: eon db info */ - DrainSeconds *int // time in seconds to wait for database users' disconnection - + DrainSeconds *int // time in seconds to wait for database users' disconnection + Sandbox *string // Stop db on given sandbox + MainCluster *bool // Stop db on main cluster only /* part 3: hidden info */ - CheckUserConn *bool // whether check user connection ForceKill *bool // whether force kill connections } @@ -42,6 +42,8 @@ type VStopDatabaseInfo struct { Password *string DrainSeconds *int IsEon bool + Sandbox string + MainCluster bool } func VStopDatabaseOptionsFactory() VStopDatabaseOptions { @@ -54,7 +56,8 @@ func VStopDatabaseOptionsFactory() VStopDatabaseOptions { func (options *VStopDatabaseOptions) setDefaultValues() { options.DatabaseOptions.setDefaultValues() - + options.Sandbox = new(string) + options.MainCluster = new(bool) options.CheckUserConn = new(bool) options.ForceKill = new(bool) } @@ -74,6 +77,9 @@ func (options *VStopDatabaseOptions) validateEonOptions(config *ClusterConfig, l if err != nil { return err } + if *options.Sandbox != "" && *options.MainCluster { + return fmt.Errorf("Error: cannot use both --sandbox and --main-cluster-only options together ") + } if !isEon { if options.DrainSeconds != nil { @@ -149,6 +155,8 @@ func (vcc *VClusterCommands) VStopDatabase(options *VStopDatabaseOptions) error stopDBInfo := new(VStopDatabaseInfo) stopDBInfo.UserName = *options.UserName stopDBInfo.Password = options.Password + stopDBInfo.Sandbox = *options.Sandbox + stopDBInfo.MainCluster = *options.MainCluster stopDBInfo.DrainSeconds = options.DrainSeconds stopDBInfo.DBName, stopDBInfo.Hosts, err = options.getNameAndHosts(options.Config) if err != nil { @@ -202,8 +210,8 @@ func (vcc *VClusterCommands) produceStopDBInstructions(stopDBInfo *VStopDatabase } } - httpsGetUpNodesOp, err := makeHTTPSGetUpNodesOp(vcc.Log, stopDBInfo.DBName, stopDBInfo.Hosts, - usePassword, *options.UserName, stopDBInfo.Password, StopDBCmd) + httpsGetUpNodesOp, err := makeHTTPSGetUpNodesWithSandboxOp(vcc.Log, stopDBInfo.DBName, stopDBInfo.Hosts, + usePassword, *options.UserName, stopDBInfo.Password, StopDBCmd, *options.Sandbox, *options.MainCluster) if err != nil { return instructions, err } @@ -219,13 +227,14 @@ func (vcc *VClusterCommands) produceStopDBInstructions(stopDBInfo *VStopDatabase vcc.Log.PrintInfo("Skipping sync catalog for an enterprise database") } - httpsStopDBOp, err := makeHTTPSStopDBOp(vcc.Log, usePassword, *options.UserName, stopDBInfo.Password, stopDBInfo.DrainSeconds) + httpsStopDBOp, err := makeHTTPSStopDBOp(vcc.Log, usePassword, *options.UserName, stopDBInfo.Password, stopDBInfo.DrainSeconds, + *options.Sandbox, *options.MainCluster) if err != nil { return instructions, err } - httpsCheckDBRunningOp, err := makeHTTPSCheckRunningDBOp(vcc.Log, stopDBInfo.Hosts, - usePassword, *options.UserName, stopDBInfo.Password, StopDB) + httpsCheckDBRunningOp, err := makeHTTPSCheckRunningDBWithSandboxOp(vcc.Log, stopDBInfo.Hosts, + usePassword, *options.UserName, *options.Sandbox, *options.MainCluster, stopDBInfo.Password, StopDB) if err != nil { return instructions, err } diff --git a/vclusterops/util/util.go b/vclusterops/util/util.go index bb7aec2..27393d7 100644 --- a/vclusterops/util/util.go +++ b/vclusterops/util/util.go @@ -29,6 +29,7 @@ import ( "reflect" "regexp" "strings" + "time" "golang.org/x/exp/constraints" "golang.org/x/exp/slices" @@ -598,3 +599,61 @@ func Max[T constraints.Ordered](a, b T) T { func GetPathPrefix(path string) string { return filepath.Dir(filepath.Dir(path)) } + +// default date time format: this omits nanoseconds but is still able to parse those out +const DefaultDateTimeFormat = time.DateTime + +// default date time format: this includes nanoseconds +const DefaultDateTimeNanoSecFormat = time.DateTime + ".000000000" + +// default date only format: this omits time within a date +const DefaultDateOnlyFormat = time.DateOnly + +// import time package in this util file so other files don't need to import time +// wrapper function to handle empty input string, returns an error if the time is invalid +// caller responsible for passing in correct layout +func IsEmptyOrValidTimeStr(layout string, value *string) (*time.Time, error) { + if value == nil || *value == "" { + return nil, nil + } + parsedTime, err := time.Parse(layout, *value) + if err != nil { + return nil, err + } + return &parsedTime, nil +} + +func fillInDefaultTimeForTimestampHelper(parsedDate time.Time, hour, minute, second, + nanosecond int) (string, time.Time) { + year, month, day := parsedDate.Year(), parsedDate.Month(), parsedDate.Day() + location := parsedDate.Location() // Extracting the timezone + datetime := time.Date(year, month, day, hour, minute, second, nanosecond, location) + formatedDatetime := datetime.Format(DefaultDateTimeNanoSecFormat) + return formatedDatetime, datetime +} + +// Read date only string from argument, fill in time, overwrite argument by date time string, and return parsed time, +// the filled in time will indicate the beginning of a day +func FillInDefaultTimeForStartTimestamp(dateonly *string) *time.Time { + parsedDate, _ := time.Parse(DefaultDateOnlyFormat, *dateonly) + formatedDatetime, datetime := fillInDefaultTimeForTimestampHelper(parsedDate, 0, 0, 0, 0) + *dateonly = formatedDatetime + return &datetime +} + +// Read date only string from argument, fill in time, overwrite argument by date time string, and return parsed time, +// the filled in time will indicate the end of a day (right before the beginning of the following day) +func FillInDefaultTimeForEndTimestamp(dateonly *string) *time.Time { + parsedDate, _ := time.Parse(DefaultDateOnlyFormat, *dateonly) + const lastHour = 23 + const lastMin = 59 + const lastSec = 59 + const lastNanoSec = 999999999 + formatedDatetime, datetime := fillInDefaultTimeForTimestampHelper(parsedDate, lastHour, lastMin, lastSec, lastNanoSec) + *dateonly = formatedDatetime + return &datetime +} + +func IsTimeEqualOrAfter(start, end time.Time) bool { + return end.Equal(start) || end.After(start) +} diff --git a/vclusterops/util/util_test.go b/vclusterops/util/util_test.go index 0229e8e..39b042d 100644 --- a/vclusterops/util/util_test.go +++ b/vclusterops/util/util_test.go @@ -372,3 +372,22 @@ func TestValidateCommunalStorageLocation(t *testing.T) { err = ValidateCommunalStorageLocation("s3://vertica-fleeting///k8s/revive_eon_5") assert.Error(t, err) } + +func TestIsEmptyOrValidTimeStr(t *testing.T) { + const layout = "2006-01-02 15:04:05.000000" + testTimeString := new(string) + + // positive cases + *testTimeString = "" + _, err := IsEmptyOrValidTimeStr(layout, testTimeString) + assert.NoError(t, err) + + *testTimeString = "2023-05-02 14:10:31.038289" + _, err = IsEmptyOrValidTimeStr(layout, testTimeString) + assert.NoError(t, err) + + // negative case + *testTimeString = "invalid time" + _, err = IsEmptyOrValidTimeStr(layout, testTimeString) + assert.ErrorContains(t, err, "cannot parse") +}