diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml index 1007d70..c21b89d 100644 --- a/.github/workflows/unittests.yaml +++ b/.github/workflows/unittests.yaml @@ -17,7 +17,7 @@ jobs: with: # When changing this, be sure to update the version at: # vertica/make/build-container/*Dockerfile - go-version: "1.20.13" + go-version: "1.22.5" - name: Run unit tests run: make vet lint test diff --git a/Makefile b/Makefile index 11eada7..572910a 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,7 @@ default: help +# Note: This file and siblings are under github.com/vertica/vcluster/ + ##@ General # The help target prints out all targets with their descriptions organized @@ -47,14 +49,13 @@ $(LOCALBIN): mkdir -p $(LOCALBIN) GOLANGCI_LINT = $(LOCALBIN)/golangci-lint -# Make sure that whenever changing this, you also change -# vertica/make/build-container/*Dockerfile -GOLANGCI_LINT_VER ?= 1.54.2 +# see [sandbox]/__golint_version__.txt +GOLANGCI_LINT_VERSION ?= 1.56.0 .PHONY: golangci-lint $(GOLANGCI_LINT) golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint $(GOLANGCI_LINT): $(LOCALBIN) -ifneq (${GOLANGCI_LINT_VER}, $(shell [ -f $(GOLANGCI_LINT) ] && $(GOLANGCI_LINT) version --format short 2>&1)) - @echo "golangci-lint missing or not version '${GOLANGCI_LINT_VER}', downloading..." - curl --retry 10 --retry-max-time 1800 -sSfL "https://raw.githubusercontent.com/golangci/golangci-lint/v${GOLANGCI_LINT_VER}/install.sh" | sh -s -- -b ./bin "v${GOLANGCI_LINT_VER}" +ifneq (${GOLANGCI_LINT_VERSION}, $(shell [ -f $(GOLANGCI_LINT) ] && $(GOLANGCI_LINT) version --format short 2>&1)) + @echo "golangci-lint missing or not version '${GOLANGCI_LINT_VERSION}', downloading..." + curl --retry 10 --retry-max-time 1800 -sSfL "https://raw.githubusercontent.com/golangci/golangci-lint/v${GOLANGCI_LINT_VERSION}/install.sh" | sh -s -- -b ./bin "v${GOLANGCI_LINT_VERSION}" endif diff --git a/README.md b/README.md index 34285f5..86f79a7 100644 --- a/README.md +++ b/README.md @@ -94,4 +94,4 @@ We can use similar way to set up and call other vcluster-ops commands. ## Licensing -vcluster is open source code and is under the Apache 2.0 license. Please see `LICENSE` for details. \ No newline at end of file +vcluster is open source and is under the Apache 2.0 license. Please see `LICENSE` for details. diff --git a/commands/cluster_command_launcher.go b/commands/cluster_command_launcher.go index 69a72c5..927c3a0 100644 --- a/commands/cluster_command_launcher.go +++ b/commands/cluster_command_launcher.go @@ -59,6 +59,8 @@ const ( dataPathKey = "dataPath" communalStorageLocationFlag = "communal-storage-location" communalStorageLocationKey = "communalStorageLocation" + archiveNameFlag = "archive-name" + archiveNameKey = "archiveName" ipv6Flag = "ipv6" ipv6Key = "ipv6" eonModeFlag = "eon-mode" @@ -118,8 +120,18 @@ const ( targetPasswordFileKey = "targetPasswordFile" targetConnFlag = "target-conn" targetConnKey = "targetConn" + asyncFlag = "async" + asyncKey = "async" sourceTLSConfigFlag = "source-tlsconfig" sourceTLSConfigKey = "sourceTLSConfig" + tableOrSchemaNameFlag = "table-or-schema-name" + tableOrSchemaNameKey = "tableOrSchemaName" + includePatternFlag = "include-pattern" + includePatternKey = "includePattern" + excludePatternFlag = "exclude-pattern" + excludePatternKey = "excludePattern" + targetNamespaceFlag = "target-namespace" + targetNamespaceKey = "targetNamespace" ) // flags to viper key map @@ -146,11 +158,17 @@ var flagKeyMap = map[string]string{ verboseFlag: verboseKey, outputFileFlag: outputFileKey, sandboxFlag: sandboxKey, + archiveNameFlag: archiveNameKey, targetDBNameFlag: targetDBNameKey, targetHostsFlag: targetHostsKey, targetUserNameFlag: targetUserNameKey, targetPasswordFileFlag: targetPasswordFileKey, + asyncFlag: asyncKey, sourceTLSConfigFlag: sourceTLSConfigKey, + tableOrSchemaNameFlag: tableOrSchemaNameKey, + includePatternFlag: includePatternKey, + excludePatternFlag: excludePatternKey, + targetNamespaceFlag: targetNamespaceKey, } // target database flags to viper key map @@ -198,7 +216,10 @@ const ( showRestorePointsSubCmd = "show_restore_points" installPkgSubCmd = "install_packages" // hidden Cmds (for internal testing only) - promoteSandboxSubCmd = "promote_sandbox" + promoteSandboxSubCmd = "promote_sandbox" + createArchiveCmd = "create_archive" + saveRestorePointsSubCmd = "save_restore_point" + getDrainingStatusSubCmd = "get_draining_status" ) // cmdGlobals holds global variables shared by multiple @@ -562,9 +583,10 @@ func constructCmds() []*cobra.Command { makeCmdReplication(), makeCmdCreateConnection(), // hidden cmds (for internal testing only) + makeCmdGetDrainingStatus(), makeCmdPromoteSandbox(), - - makeCmdCheckVClusterServerPid(), + makeCmdCreateArchive(), + makeCmdSaveRestorePoint(), } } diff --git a/commands/cmd_add_node.go b/commands/cmd_add_node.go index 45c0b7e..4f9636d 100644 --- a/commands/cmd_add_node.go +++ b/commands/cmd_add_node.go @@ -104,9 +104,7 @@ func (c *CmdAddNode) setLocalFlags(cmd *cobra.Command) { &c.addNodeOptions.DepotSize, "depot-size", "", - util.GetEonFlagMsg("Size of depot in one of the following formats:\n"+ - "integer{K|M|G|T}, where K is kilobytes, M is megabytes, G is gigabytes, and T is terabytes.\n"+ - "integer%, which expresses the depot size as a percentage of the total disk size."), + util.GetEonFlagMsg(util.DepotFmtMsg+util.DepotSizeKMGTMsg+util.DepotSizeHint), ) cmd.Flags().StringVar( &c.nodeNameListStr, diff --git a/commands/cmd_add_subcluster.go b/commands/cmd_add_subcluster.go index ef921ac..bd5be3a 100644 --- a/commands/cmd_add_subcluster.go +++ b/commands/cmd_add_subcluster.go @@ -132,8 +132,7 @@ func (c *CmdAddSubcluster) setLocalFlags(cmd *cobra.Command) { &c.addSubclusterOptions.DepotSize, "depot-size", "", - util.GetEonFlagMsg("Size of depot in one of the following formats:\n"+ - "integer{K|M|G|T}, where K is kilobytes, M is megabytes, G is gigabytes, and T is terabytes.\n"+ + util.GetEonFlagMsg(util.DepotFmtMsg+util.DepotSizeKMGTMsg+ "integer%, which expresses the depot size as a percentage of the total disk size.\n"), ) } diff --git a/commands/cmd_check_vcluster_server_pid.go b/commands/cmd_check_vcluster_server_pid.go deleted file mode 100644 index 88f934e..0000000 --- a/commands/cmd_check_vcluster_server_pid.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - (c) Copyright [2023-2024] Open Text. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - "github.com/vertica/vcluster/vclusterops" - "github.com/vertica/vcluster/vclusterops/vlog" -) - -/* CmdCheckVClusterServerPid - * - * Implements ClusterCommand interface - */ -type CmdCheckVClusterServerPid struct { - checkPidOptions *vclusterops.VCheckVClusterServerPidOptions - - CmdBase -} - -func makeCmdCheckVClusterServerPid() *cobra.Command { - newCmd := &CmdCheckVClusterServerPid{} - opt := vclusterops.VCheckVClusterServerPidOptionsFactory() - newCmd.checkPidOptions = &opt - - cmd := makeBasicCobraCmd( - newCmd, - "check_pid", - "Check VCluster server PID files", - `Check VCluster server PID files in nodes to make sure that -only one host is running the VCluster server`, - []string{hostsFlag}, - ) - - return cmd -} - -func (c *CmdCheckVClusterServerPid) Parse(inputArgv []string, logger vlog.Printer) error { - c.argv = inputArgv - logger.LogArgParse(&c.argv) - - return c.validateParse(logger) -} - -func (c *CmdCheckVClusterServerPid) validateParse(logger vlog.Printer) error { - logger.Info("Called validateParse()") - if !c.usePassword() { - err := c.getCertFilesFromCertPaths(&c.checkPidOptions.DatabaseOptions) - if err != nil { - return err - } - } - return c.ValidateParseBaseOptions(&c.checkPidOptions.DatabaseOptions) -} - -func (c *CmdCheckVClusterServerPid) Run(vcc vclusterops.ClusterCommands) error { - vcc.V(1).Info("Called method Run()") - - hostsWithVclusterServerPid, err := vcc.VCheckVClusterServerPid(c.checkPidOptions) - if err != nil { - vcc.LogError(err, "failed to drop the database") - return err - } - fmt.Printf("Hosts with VCluster server PID files: %+v\n", hostsWithVclusterServerPid) - - return nil -} - -func (c *CmdCheckVClusterServerPid) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { - c.checkPidOptions.DatabaseOptions = *opt -} diff --git a/commands/cmd_create_archive.go b/commands/cmd_create_archive.go new file mode 100644 index 0000000..2164911 --- /dev/null +++ b/commands/cmd_create_archive.go @@ -0,0 +1,178 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package commands + +import ( + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +/* CmdCreateArchive + * + * Parses arguments to create-archive and calls + * the high-level function for create-archive. + * + * Implements ClusterCommand interface + */ + +type CmdCreateArchive struct { + CmdBase + createArchiveOptions *vclusterops.VCreateArchiveOptions +} + +func makeCmdCreateArchive() *cobra.Command { + newCmd := &CmdCreateArchive{} + opt := vclusterops.VCreateArchiveFactory() + newCmd.createArchiveOptions = &opt + + cmd := makeBasicCobraCmd( + newCmd, + createArchiveCmd, + "Create an archive in a given archive name and number.", + `Create an archive in a given archive name and number. + +Examples: + # Create an archive in a given archive name + vcluster create_archive --db-name DBNAME --archive-name ARCHIVE_ONE + + # Create an archive in a given archive name and number of restore point(default 3) + vcluster create_archive --db-name DBNAME --archive-name ARCHIVE_ONE \ + --num-restore-points 6 + + # Create an archive in main cluster with user input password + vcluster create_archive --db-name DBNAME --archive-name ARCHIVE_ONE \ + --hosts 10.20.30.40,10.20.30.41,10.20.30.42 --password "PASSWORD" + + # Create an archive for a sandbox + vcluster create_archive --db-name DBNAME --archive-name ARCHIVE_ONE \ + --sandbox SANDBOX_ONE --password "PASSWORD" + +`, + []string{dbNameFlag, configFlag, passwordFlag, + hostsFlag, ipv6Flag, eonModeFlag}, + ) + + // local flags + newCmd.setLocalFlags(cmd) + + // require archive-name + markFlagsRequired(cmd, archiveNameFlag) + + // hide this subcommand + cmd.Hidden = true + + return cmd +} + +// setLocalFlags will set the local flags the command has +func (c *CmdCreateArchive) setLocalFlags(cmd *cobra.Command) { + cmd.Flags().StringVar( + &c.createArchiveOptions.ArchiveName, + archiveNameFlag, + "", + "The name of archive to be created.", + ) + cmd.Flags().IntVar( + &c.createArchiveOptions.NumOfArchives, + "num-restore-points", + vclusterops.CreateArchiveDefaultNumRestore, + "Maximum number of restore points that archive can contain."+ + "If you provide 0, the number of restore points will be unlimited. "+ + "By default, the value is 0.", + ) + cmd.Flags().StringVar( + &c.createArchiveOptions.Sandbox, + sandboxFlag, + "", + "The name of target sandbox", + ) +} + +func (c *CmdCreateArchive) Parse(inputArgv []string, logger vlog.Printer) error { + c.argv = inputArgv + logger.LogArgParse(&c.argv) + + // for some options, we do not want to use their default values, + // if they are not provided in cli, + // reset the value of those options to nil + c.ResetUserInputOptions(&c.createArchiveOptions.DatabaseOptions) + + // create_archive only works for an Eon db so we assume the user always runs this subcommand + // on an Eon db. When Eon mode cannot be found in config file, we set its value to true. + if !viper.IsSet(eonModeKey) { + c.createArchiveOptions.IsEon = true + } + + return c.validateParse(logger) +} + +// all validations of the arguments should go in here +// +//nolint:dupl +func (c *CmdCreateArchive) validateParse(logger vlog.Printer) error { + logger.Info("Called validateParse()") + + err := c.ValidateParseBaseOptions(&c.createArchiveOptions.DatabaseOptions) + if err != nil { + return err + } + + if !c.usePassword() { + err = c.getCertFilesFromCertPaths(&c.createArchiveOptions.DatabaseOptions) + if err != nil { + return err + } + } + + err = c.setConfigParam(&c.createArchiveOptions.DatabaseOptions) + if err != nil { + return err + } + + err = c.setDBPassword(&c.createArchiveOptions.DatabaseOptions) + if err != nil { + return err + } + + return nil +} + +func (c *CmdCreateArchive) Analyze(logger vlog.Printer) error { + logger.Info("Called method Analyze()") + return nil +} + +func (c *CmdCreateArchive) Run(vcc vclusterops.ClusterCommands) error { + vcc.LogInfo("Called method Run()") + + options := c.createArchiveOptions + + err := vcc.VCreateArchive(options) + if err != nil { + vcc.LogError(err, "failed to create archive", "archiveName", options.ArchiveName) + return err + } + + vcc.DisplayInfo("Successfully created archive: %s", options.ArchiveName) + return nil +} + +// SetDatabaseOptions will assign a vclusterops.DatabaseOptions instance to the one in CmdCreateArchive +func (c *CmdCreateArchive) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { + c.createArchiveOptions.DatabaseOptions = *opt +} diff --git a/commands/cmd_create_db.go b/commands/cmd_create_db.go index 005fa11..a869996 100644 --- a/commands/cmd_create_db.go +++ b/commands/cmd_create_db.go @@ -125,9 +125,7 @@ func (c *CmdCreateDB) setLocalFlags(cmd *cobra.Command) { &c.createDBOptions.DepotSize, "depot-size", "", - util.GetEonFlagMsg("Size of depot in one of the following formats:\n"+ - "integer {K|M|G|T}, where K is kilobytes, M is megabytes, G is gigabytes, and T is terabytes.\n"+ - "integer%, which expresses the depot size as a percentage of the total disk size."), + util.GetEonFlagMsg(util.DepotFmtMsg+util.DepotSizeKMGTMsg+util.DepotSizeHint), ) cmd.Flags().BoolVar( &c.createDBOptions.GetAwsCredentialsFromEnv, @@ -277,7 +275,6 @@ func (c *CmdCreateDB) Run(vcc vclusterops.ClusterCommands) error { vcc.V(1).Info("Called method Run()") vdb, createError := vcc.VCreateDatabase(c.createDBOptions) if createError != nil { - vcc.LogError(createError, "Failed to create the database.") return createError } diff --git a/commands/cmd_get_draining_status.go b/commands/cmd_get_draining_status.go new file mode 100644 index 0000000..2db7426 --- /dev/null +++ b/commands/cmd_get_draining_status.go @@ -0,0 +1,159 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + DISCLAIMER: + The subcommand (get_draining_status) within this file is intended solely for internal testing purposes. + It is not designed, intended, or authorized for use in production environments. The behavior of this + subcommand may change without prior notice and is not guaranteed to be maintained in future releases. +*/ + +package commands + +import ( + "encoding/json" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +/* CmdGetDrainingStatus + * + * Implements ClusterCommand interface + */ +type CmdGetDrainingStatus struct { + CmdBase + getDrainingStatusOpt *vclusterops.VGetDrainingStatusOptions +} + +func makeCmdGetDrainingStatus() *cobra.Command { + // CmdGetDrainingStatus + newCmd := &CmdGetDrainingStatus{} + opt := vclusterops.VGetDrainingStatusFactory() + newCmd.getDrainingStatusOpt = &opt + + cmd := makeBasicCobraCmd( + newCmd, + getDrainingStatusSubCmd, + "Get draining status.", + `Get draining status. + +Examples: + # Show draining status of all subclusters in main cluster with user input + vcluster get_draining_status --db-name test_db \ + --hosts 10.20.30.40,10.20.30.41,10.20.30.42 + + # Show draining status of all subclusters in main cluster with config file + vcluster get_draining_status \ + --config /opt/vertica/config/vertica_cluster.yaml + + # Show draining status of all subclusters in a sandbox with config file + vcluster get_draining_status --sandbox sand1 \ + --config /opt/vertica/config/vertica_cluster.yaml +`, + []string{dbNameFlag, configFlag, passwordFlag, hostsFlag, ipv6Flag, sandboxFlag, + eonModeFlag, outputFileFlag}, + ) + + // local flags + newCmd.setLocalFlags(cmd) + + // hide this subcommand + cmd.Hidden = true + + return cmd +} + +// setLocalFlags will set the local flags the command has +func (c *CmdGetDrainingStatus) setLocalFlags(cmd *cobra.Command) { + cmd.Flags().StringVar( + &c.getDrainingStatusOpt.Sandbox, + sandboxFlag, + "", + "The name of target sandbox", + ) +} + +func (c *CmdGetDrainingStatus) Parse(inputArgv []string, logger vlog.Printer) error { + c.argv = inputArgv + logger.LogMaskedArgParse(c.argv) + + // for some options, we do not want to use their default values, + // if they are not provided in cli, + // reset the value of those options to nil + c.ResetUserInputOptions(&c.getDrainingStatusOpt.DatabaseOptions) + + // get_draining_status only works for an Eon db so we assume the user always runs this subcommand + // on an Eon db. When Eon mode cannot be found in config file, we set its value to true. + if !viper.IsSet(eonModeKey) { + c.getDrainingStatusOpt.IsEon = true + } + + return c.validateParse(logger) +} + +func (c *CmdGetDrainingStatus) validateParse(logger vlog.Printer) error { + logger.Info("Called validateParse()") + + if !c.usePassword() { + err := c.getCertFilesFromCertPaths(&c.getDrainingStatusOpt.DatabaseOptions) + if err != nil { + return err + } + } + + err := c.ValidateParseBaseOptions(&c.getDrainingStatusOpt.DatabaseOptions) + if err != nil { + return err + } + + err = c.setDBPassword(&c.getDrainingStatusOpt.DatabaseOptions) + if err != nil { + return err + } + return nil +} + +func (c *CmdGetDrainingStatus) Analyze(logger vlog.Printer) error { + logger.Info("Called method Analyze()") + return nil +} + +func (c *CmdGetDrainingStatus) Run(vcc vclusterops.ClusterCommands) error { + vcc.V(1).Info("Called method Run()") + + options := c.getDrainingStatusOpt + + drainingStatusList, err := vcc.VGetDrainingStatus(options) + if err != nil { + vcc.LogError(err, "failed to get draining status list", "cluster", util.GetClusterName(options.Sandbox)) + return err + } + bytes, err := json.MarshalIndent(drainingStatusList, "", " ") + if err != nil { + return err + } + bytes = append(bytes, '\n') + c.writeCmdOutputToFile(globals.file, bytes, vcc.GetLog()) + + vcc.DisplayInfo("Successfully displayed draining status list for %s", util.GetClusterName(options.Sandbox)) + return nil +} + +// SetDatabaseOptions will assign a vclusterops.DatabaseOptions instance to the one in CmdGetDrainingStatus +func (c *CmdGetDrainingStatus) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { + c.getDrainingStatusOpt.DatabaseOptions = *opt +} diff --git a/commands/cmd_sandbox.go b/commands/cmd_sandbox.go index 1ab5cf0..b3c0717 100644 --- a/commands/cmd_sandbox.go +++ b/commands/cmd_sandbox.go @@ -18,6 +18,7 @@ package commands import ( "github.com/spf13/cobra" "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/util" "github.com/vertica/vcluster/vclusterops/vlog" ) @@ -155,7 +156,7 @@ func (c *CmdSandboxSubcluster) Analyze(logger vlog.Printer) error { } func (c *CmdSandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { - vcc.LogInfo("Calling method Run() for command " + sandboxSubCmd) + vcc.LogInfo(util.CallCommand + sandboxSubCmd) options := c.sbOptions @@ -182,9 +183,18 @@ func (c *CmdSandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { writeErr := dbConfig.write(options.ConfigPath, true /*forceOverwrite*/) if writeErr != nil { - vcc.DisplayWarning("Failed to write the configuration file: " + writeErr.Error()) + vcc.DisplayWarning(util.FailToWriteToConfig + writeErr.Error()) return nil } + + options.DatabaseOptions.Hosts = options.SCHosts + pollOpts := vclusterops.VPollSubclusterStateOptions{DatabaseOptions: options.DatabaseOptions, + SkipOptionsValidation: true, SCName: options.SCName} + err = vcc.VPollSubclusterState(&pollOpts) + if err != nil { + vcc.LogError(err, "Failed to wait for sandboxed subcluster to come up") + return err + } return nil } diff --git a/commands/cmd_save_restore_point.go b/commands/cmd_save_restore_point.go new file mode 100644 index 0000000..6dc3f62 --- /dev/null +++ b/commands/cmd_save_restore_point.go @@ -0,0 +1,163 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package commands + +import ( + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +/* CmdSaveRestorePoint + * + * Parses arguments to save-restore-points and calls + * the high-level function for save-restore-points. + * + * Implements ClusterCommand interface + */ + +type CmdSaveRestorePoint struct { + CmdBase + saveRestoreOptions *vclusterops.VSaveRestorePointOptions +} + +func makeCmdSaveRestorePoint() *cobra.Command { + newCmd := &CmdSaveRestorePoint{} + opt := vclusterops.VSaveRestorePointFactory() + newCmd.saveRestoreOptions = &opt + + cmd := makeBasicCobraCmd( + newCmd, + saveRestorePointsSubCmd, + "Save a restore point in a given archive.", + `Save a restore point in a given archive. + +Examples: + # Save restore point in a given archive with user input + vcluster save_restore_point --db-name test_db \ + --archive-name ARCHIVE_ONE + + # Save restore point for a sandbox + vcluster save_restore_point --db-name test_db \ + --archive-name ARCHIVE_ONE --sandbox SANDBOX_ONE + +`, + []string{dbNameFlag, hostsFlag, passwordFlag, + ipv6Flag, configFlag, eonModeFlag}, + ) + + // local flags + newCmd.setLocalFlags(cmd) + + // require db-name and archive-name + markFlagsRequired(cmd, dbNameFlag, archiveNameFlag) + + // hide this subcommand + cmd.Hidden = true + + return cmd +} + +// setLocalFlags will set the local flags the command has +func (c *CmdSaveRestorePoint) setLocalFlags(cmd *cobra.Command) { + cmd.Flags().StringVar( + &c.saveRestoreOptions.ArchiveName, + archiveNameFlag, + "", + "Collection of restore points that belong to a certain archive.", + ) + cmd.Flags().StringVar( + &c.saveRestoreOptions.Sandbox, + sandboxFlag, + "", + "The name of target sandbox", + ) +} + +func (c *CmdSaveRestorePoint) Parse(inputArgv []string, logger vlog.Printer) error { + c.argv = inputArgv + logger.LogArgParse(&c.argv) + + // for some options, we do not want to use their default values, + // if they are not provided in cli, + // reset the value of those options to nil + c.ResetUserInputOptions(&c.saveRestoreOptions.DatabaseOptions) + + // save_restore_point only works for an Eon db so we assume the user always runs this subcommand + // on an Eon db. When Eon mode cannot be found in config file, we set its value to true. + if !viper.IsSet(eonModeKey) { + c.saveRestoreOptions.IsEon = true + } + + return c.validateParse(logger) +} + +// all validations of the arguments should go in here +// +//nolint:dupl +func (c *CmdSaveRestorePoint) validateParse(logger vlog.Printer) error { + logger.Info("Called validateParse()") + + err := c.ValidateParseBaseOptions(&c.saveRestoreOptions.DatabaseOptions) + if err != nil { + return err + } + + if !c.usePassword() { + err = c.getCertFilesFromCertPaths(&c.saveRestoreOptions.DatabaseOptions) + if err != nil { + return err + } + } + + err = c.setConfigParam(&c.saveRestoreOptions.DatabaseOptions) + if err != nil { + return err + } + + err = c.setDBPassword(&c.saveRestoreOptions.DatabaseOptions) + if err != nil { + return err + } + + return nil +} + +func (c *CmdSaveRestorePoint) Analyze(logger vlog.Printer) error { + logger.Info("Called method Analyze()") + return nil +} + +func (c *CmdSaveRestorePoint) Run(vcc vclusterops.ClusterCommands) error { + vcc.LogInfo("Called method Run()") + + options := c.saveRestoreOptions + + err := vcc.VSaveRestorePoint(options) + if err != nil { + vcc.LogError(err, "failed to save restore points", "DBName", options.DBName) + return err + } + + vcc.DisplayInfo("Successfully saved restore points in database %s", options.DBName) + return nil +} + +// SetDatabaseOptions will assign a vclusterops.DatabaseOptions instance to the one in CmdSaveRestorePoint +func (c *CmdSaveRestorePoint) SetDatabaseOptions(opt *vclusterops.DatabaseOptions) { + c.saveRestoreOptions.DatabaseOptions = *opt +} diff --git a/commands/cmd_scrutinize.go b/commands/cmd_scrutinize.go index 86b2faa..3b54527 100644 --- a/commands/cmd_scrutinize.go +++ b/commands/cmd_scrutinize.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "os" + "path" "regexp" "strings" @@ -27,6 +28,7 @@ import ( "github.com/spf13/cobra" "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/util" "github.com/vertica/vcluster/vclusterops/vlog" "github.com/vertica/vertica-kubernetes/pkg/secrets" ) @@ -49,6 +51,7 @@ const ( const ( databaseName = "DATABASE_NAME" catalogPathPref = "CATALOG_PATH" + withFmt = "with the format: " ) // secretRetriever is an interface for retrieving secrets. @@ -127,21 +130,21 @@ func (c *CmdScrutinize) setLocalFlags(cmd *cobra.Command) { "log-age-oldest-time", "", "Timestamp of the maximum age of archived Vertica log files to collect \n"+ - "with the format: "+vclusterops.ScrutinizeHelpTimeFormatDesc, + withFmt+vclusterops.ScrutinizeHelpTimeFormatDesc, ) cmd.Flags().StringVar( &c.sOptions.LogAgeNewestTime, "log-age-newest-time", "", "Timestamp of the minimum age of archived Vertica log files to collect "+ - "with the format: "+vclusterops.ScrutinizeHelpTimeFormatDesc, + withFmt+vclusterops.ScrutinizeHelpTimeFormatDesc, ) cmd.Flags().IntVar( &c.sOptions.LogAgeHours, "log-age-hours", vclusterops.ScrutinizeLogMaxAgeHoursDefault, "The maximum age, in hours, of archived Vertica log files to collect."+ - "Default: "+fmt.Sprint(vclusterops.ScrutinizeLogMaxAgeHoursDefault), + util.Default+fmt.Sprint(vclusterops.ScrutinizeLogMaxAgeHoursDefault), ) cmd.MarkFlagsMutuallyExclusive("log-age-hours", "log-age-oldest-time") cmd.MarkFlagsMutuallyExclusive("log-age-hours", "log-age-newest-time") @@ -412,6 +415,16 @@ func (c *CmdScrutinize) nmaCertLookupFromEnv(logger vlog.Printer) (bool, error) var err error + if rootCAPath != "" { + rootCAPath = path.Join(util.RootDir, rootCAPath) + } + if certPath != "" { + certPath = path.Join(util.RootDir, certPath) + } + if keyPath != "" { + keyPath = path.Join(util.RootDir, keyPath) + } + c.sOptions.CaCert, err = readNonEmptyFile(rootCAPath) if err != nil { return false, fmt.Errorf("failed to read root CA from %s: %w", rootCAPath, err) diff --git a/commands/cmd_show_restore_points.go b/commands/cmd_show_restore_points.go index efa3105..c43d665 100644 --- a/commands/cmd_show_restore_points.go +++ b/commands/cmd_show_restore_points.go @@ -32,6 +32,8 @@ type CmdShowRestorePoints struct { showRestorePointsOptions *vclusterops.VShowRestorePointsOptions } +const dateTimeOnly = "in either date-time or date-only format." + func makeCmdShowRestorePoints() *cobra.Command { // CmdShowRestorePoints newCmd := &CmdShowRestorePoints{} @@ -113,15 +115,13 @@ func (c *CmdShowRestorePoints) setLocalFlags(cmd *cobra.Command) { &c.showRestorePointsOptions.FilterOptions.StartTimestamp, "start-timestamp", "", - "Shows restore points after and including the specified UTC timestamp \n"+ - "in either date-time or date-only format.", + "Shows restore points after and including the specified UTC timestamp \n"+dateTimeOnly, ) cmd.Flags().StringVar( &c.showRestorePointsOptions.FilterOptions.EndTimestamp, "end-timestamp", "", - "Shows restore points up to and including the specified UTC timestamp \n"+ - "in either date-time or date-only format.", + "Shows restore points up to and including the specified UTC timestamp \n"+dateTimeOnly, ) } diff --git a/commands/cmd_start_db.go b/commands/cmd_start_db.go index 3fdc57c..04f1482 100644 --- a/commands/cmd_start_db.go +++ b/commands/cmd_start_db.go @@ -190,7 +190,7 @@ func (c *CmdStartDB) validateParse(logger vlog.Printer) error { } return nil } -func filterInputHosts(options *vclusterops.VStartDatabaseOptions, dbConfig *DatabaseConfig) []string { +func FilterInputHostsForStartDB(options *vclusterops.VStartDatabaseOptions, dbConfig *DatabaseConfig) []string { filteredHosts := []string{} for _, n := range dbConfig.Nodes { // Collect sandbox hosts @@ -202,6 +202,10 @@ func filterInputHosts(options *vclusterops.VStartDatabaseOptions, dbConfig *Data filteredHosts = append(filteredHosts, n.Address) } } + // TODO: Hosts is no longer populated by this point, and RawHosts may contain either user-specified + // hosts in IP or name form, or all hosts in the config file in IP form. We need to resolve RawHosts + // before the comparison can be made with the IPs from config. As is, the condition will never be hit, + // and all hosts in the specified sandbox (main or otherwise) will be used. if len(options.Hosts) > 0 { return util.SliceCommon(filteredHosts, options.Hosts) } @@ -218,7 +222,7 @@ func (c *CmdStartDB) Run(vcc vclusterops.ClusterCommands) error { if readConfigErr == nil { options.ReadFromConfig = true if options.Sandbox != util.MainClusterSandbox || options.MainCluster { - options.RawHosts = filterInputHosts(options, dbConfig) + options.RawHosts = FilterInputHostsForStartDB(options, dbConfig) } options.FirstStartAfterRevive = dbConfig.FirstStartAfterRevive } else { diff --git a/commands/cmd_start_replication.go b/commands/cmd_start_replication.go index 0f9a49c..ba84f90 100644 --- a/commands/cmd_start_replication.go +++ b/commands/cmd_start_replication.go @@ -87,8 +87,13 @@ Examples: cmd.MarkFlagsOneRequired(targetConnFlag, targetDBNameFlag) cmd.MarkFlagsOneRequired(targetConnFlag, targetHostsFlag) + // tableOrSchema or pattern can not be accepted together + cmd.MarkFlagsMutuallyExclusive(tableOrSchemaNameFlag, includePatternFlag) + cmd.MarkFlagsMutuallyExclusive(tableOrSchemaNameFlag, excludePatternFlag) + // hide eon mode flag since we expect it to come from config file, not from user input - hideLocalFlags(cmd, []string{eonModeFlag}) + hideLocalFlags(cmd, []string{eonModeFlag, asyncFlag, tableOrSchemaNameFlag, + includePatternFlag, excludePatternFlag, targetNamespaceFlag}) return cmd } @@ -131,7 +136,51 @@ func (c *CmdStartReplication) setLocalFlags(cmd *cobra.Command) { "[Required] The absolute path to the connection file created with the create_connection command, "+ "containing the database name, hosts, and password (if any) for the target database. "+ "Alternatively, you can provide this information manually with --target-db-name, "+ - "--target-hosts, and --target-password-file", + "--target-hosts, and --target-password-file") + cmd.Flags().BoolVar( + &c.startRepOptions.Async, + asyncFlag, + false, + "If set to true, will run the replicate operation asynchronously. "+ + "Default value is false.", + ) + cmd.Flags().StringVar( + &c.startRepOptions.ObjectName, + tableOrSchemaNameFlag, + "", + "(only async replication)The object name we want to copy from the source side. The available"+ + " types are: namespace, schema, table. If this is omitted, the operator"+ + " will replicate all namespaces in the source database.", + ) + cmd.Flags().StringVar( + &c.startRepOptions.IncludePattern, + includePatternFlag, + "", + "(only async replication)A string containing a wildcard pattern of the schemas and/or tables to"+ + "include in the replication. Namespace names must be front-qualified "+ + "with a period.", + ) + cmd.Flags().StringVar( + &c.startRepOptions.ExcludePattern, + excludePatternFlag, + "", + "(only async replication)A string containing a wildcard pattern of the schemas and/or tables"+ + " to exclude from the set of tables matched by the include pattern. "+ + "Namespace names must be front-qualified with a period.", + ) + cmd.Flags().StringVar( + &c.startRepOptions.TargetNamespace, + targetNamespaceFlag, + "", + "(only async replication)Namespace in the target database to which objects are replicated."+ + " The target namespace must have the same shard count as the source "+ + "namespace in the source cluster."+ + "If you do not specify a target namespace, objects are replicated to"+ + " a namespace with the same name as the source namespace. If no such"+ + " namespace exists in the target cluster, it is created with the same"+ + " name and shard count as the source namespace. You can only replicate"+ + " tables in the public schema to the default_namespace in the target"+ + " cluster.", ) markFlagsFileName(cmd, map[string][]string{targetConnFlag: {"yaml"}}) // password flags diff --git a/commands/cmd_stop_db.go b/commands/cmd_stop_db.go index fc726b6..9a821bb 100644 --- a/commands/cmd_stop_db.go +++ b/commands/cmd_stop_db.go @@ -74,11 +74,8 @@ func (c *CmdStopDB) setLocalFlags(cmd *cobra.Command) { c.stopDBOptions.DrainSeconds, "drain-seconds", util.DefaultDrainSeconds, - util.GetEonFlagMsg("The time to wait, in seconds, for user connections to close on their own.\n"+ - "When the time expires, user connections are automatically closed and the database is hut down.\n"+ - "If set to 0, VCluster closes all user connections immediately.\n"+ - "If the value is negative, VCluster waits indefinitely until all user connections close."+ - "Default: "+strconv.Itoa(util.DefaultDrainSeconds)), + util.GetEonFlagMsg(util.TimeToWaitToClose+util.TimeExpire+util.CloseAllConns+util.InfiniteWaitTime+ + util.Default+strconv.Itoa(util.DefaultDrainSeconds)), ) cmd.Flags().StringVar( &c.stopDBOptions.SandboxName, diff --git a/commands/cmd_stop_subcluster.go b/commands/cmd_stop_subcluster.go index eb7b580..5d1f605 100644 --- a/commands/cmd_stop_subcluster.go +++ b/commands/cmd_stop_subcluster.go @@ -87,11 +87,8 @@ func (c *CmdStopSubcluster) setLocalFlags(cmd *cobra.Command) { &c.stopSCOptions.DrainSeconds, "drain-seconds", util.DefaultDrainSeconds, - util.GetEonFlagMsg("The time to wait, in seconds, for user connections to close on their own.\n"+ - "When the time expires, user connections are automatically closed and the database is hut down.\n"+ - "If set to 0, VCluster closes all user connections immediately.\n"+ - "If the value is negative, VCluster waits indefinitely until all user connections close."+ - "Default: "+strconv.Itoa(util.DefaultDrainSeconds)), + util.GetEonFlagMsg(util.TimeToWaitToClose+util.TimeExpire+util.CloseAllConns+util.InfiniteWaitTime+ + util.Default+strconv.Itoa(util.DefaultDrainSeconds)), ) cmd.Flags().StringVar( &c.stopSCOptions.SCName, diff --git a/commands/cmd_unsandbox.go b/commands/cmd_unsandbox.go index 9e1b67f..56225c5 100644 --- a/commands/cmd_unsandbox.go +++ b/commands/cmd_unsandbox.go @@ -20,6 +20,7 @@ import ( "github.com/spf13/cobra" "github.com/vertica/vcluster/vclusterops" + "github.com/vertica/vcluster/vclusterops/util" "github.com/vertica/vcluster/vclusterops/vlog" ) @@ -50,15 +51,15 @@ func makeCmdUnsandboxSubcluster() *cobra.Command { newCmd, unsandboxSubCmd, "Unsandboxes a subcluster", - `Removes a subcluster from the sandbox, unsandboxing it. When you unsandbox a subcluster, -its hosts immediately shut down and restart. When the hosts come back up, + `Removes a subcluster from the sandbox, unsandboxing it. When you unsandbox a subcluster, +its hosts immediately shut down and restart. When the hosts come back up, the subcluster is unsandboxed. -When a subcluster is unsandboxed, you should manually delete that subcluster's -metadata in communal storage before attempting to add a subcluster to that sandbox +When a subcluster is unsandboxed, you should manually delete that subcluster's +metadata in communal storage before attempting to add a subcluster to that sandbox again. -For example, if you unsandbox subcluster sc1, you should delete the +For example, if you unsandbox subcluster sc1, you should delete the directory path_to_catalog_of_sc1/metadata/sandbox_name. Examples: @@ -124,7 +125,7 @@ func (c *CmdUnsandboxSubcluster) Analyze(logger vlog.Printer) error { } func (c *CmdUnsandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { - vcc.LogInfo("Calling method Run() for command " + unsandboxSubCmd) + vcc.LogInfo(util.CallCommand + unsandboxSubCmd) options := c.usOptions @@ -144,9 +145,18 @@ func (c *CmdUnsandboxSubcluster) Run(vcc vclusterops.ClusterCommands) error { writeErr := dbConfig.write(options.ConfigPath, true /*forceOverwrite*/) if writeErr != nil { - vcc.DisplayWarning("Failed to write the configuration file: " + writeErr.Error()) + vcc.DisplayWarning(util.FailToWriteToConfig + writeErr.Error()) return nil } + + options.DatabaseOptions.Hosts = options.SCHosts + pollOpts := vclusterops.VPollSubclusterStateOptions{DatabaseOptions: options.DatabaseOptions, + SkipOptionsValidation: true, SCName: options.SCName} + err = vcc.VPollSubclusterState(&pollOpts) + if err != nil { + vcc.LogError(err, "Failed to wait for unsandboxed subcluster to come up") + return err + } return nil } diff --git a/commands/user_input_test.go b/commands/user_input_test.go index dcd35a0..5fbbc40 100644 --- a/commands/user_input_test.go +++ b/commands/user_input_test.go @@ -29,6 +29,8 @@ import ( var tempConfigFilePath = os.TempDir() + "/test_vertica_cluster.yaml" +const configRecover = "vcluster manage_config recover --db-name test_db " + func simulateVClusterCli(vclusterCmd string) error { // if no log file is given, the log will go to stdout dbOptions.LogPath = "" @@ -54,16 +56,14 @@ func TestConfigRecover(t *testing.T) { err = simulateVClusterCli("vcluster manage_config recover --db-name test_db") assert.ErrorContains(t, err, `required flag(s) "catalog-path", "hosts" not set`) - err = simulateVClusterCli("vcluster manage_config recover --db-name test_db " + - "--hosts 192.168.1.101") + err = simulateVClusterCli(configRecover + "--hosts 192.168.1.101") assert.ErrorContains(t, err, `required flag(s) "catalog-path" not set`) tempConfig, _ := os.Create(tempConfigFilePath) tempConfig.Close() defer os.Remove(tempConfigFilePath) - err = simulateVClusterCli("vcluster manage_config recover --db-name test_db " + - "--hosts 192.168.1.101 --catalog-path /data " + + err = simulateVClusterCli(configRecover + "--hosts 192.168.1.101 --catalog-path /data " + "--config " + tempConfigFilePath) assert.ErrorContains(t, err, "config file exists at "+tempConfigFilePath) } diff --git a/go.mod b/go.mod index 50cc059..ad5722b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/vertica/vcluster -go 1.20 +go 1.22 require ( github.com/deckarep/golang-set/v2 v2.3.1 @@ -80,7 +80,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index d5ab701..0c9465a 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= @@ -13,6 +14,7 @@ github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -33,9 +35,11 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -96,6 +100,7 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -111,6 +116,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -140,7 +146,9 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v1.24.2 h1:J/tulyYK6JwBldPViHJReihxxZ+22FHs0piGjQAvoUE= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -153,6 +161,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -199,6 +208,7 @@ go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -312,8 +322,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -336,6 +346,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= k8s.io/apiextensions-apiserver v0.26.2 h1:/yTG2B9jGY2Q70iGskMf41qTLhL9XeNN2KhI0uDgwko= +k8s.io/apiextensions-apiserver v0.26.2/go.mod h1:Y7UPgch8nph8mGCuVk0SK83LnS8Esf3n6fUBgew8SH8= k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= @@ -347,6 +358,7 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhkl k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s= +sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/rfc7807/errors.go b/rfc7807/errors.go index 3b7b257..a95ff6e 100644 --- a/rfc7807/errors.go +++ b/rfc7807/errors.go @@ -202,6 +202,11 @@ var ( "Target path does not exist", http.StatusBadRequest, ) + InvalidCatalogPathError = newProblemID( + path.Join(errorEndpointsPrefix, "invalid-catalog-path"), + "Invalid catalog path", + http.StatusBadRequest, + ) CECatalogContentDirEmptyError = newProblemID( path.Join(errorEndpointsPrefix, "catalog-content-dir-empty-error"), "Target directory is empty", @@ -222,11 +227,21 @@ var ( "Failed to open file", http.StatusInternalServerError, ) + GenericReadFileError = newProblemID( + path.Join(errorEndpointsPrefix, "read-file-failure-error"), + "Failed to read file", + http.StatusInternalServerError, + ) GenericWriteFileError = newProblemID( path.Join(errorEndpointsPrefix, "write-file-failure-error"), "Failed to write file", http.StatusInternalServerError, ) + GenericCreateFileError = newProblemID( + path.Join(errorEndpointsPrefix, "create-file-failure-error"), + "Failed to create file", + http.StatusInternalServerError, + ) MessageQueueFull = newProblemID( path.Join(errorEndpointsPrefix, "message-queue-full"), "Message queue is full", diff --git a/vclusterops/cluster_op.go b/vclusterops/cluster_op.go index a725d67..2ecf82a 100644 --- a/vclusterops/cluster_op.go +++ b/vclusterops/cluster_op.go @@ -72,10 +72,11 @@ const ( ) const ( - SuccessCode = 200 - MultipleChoiceCode = 300 - UnauthorizedCode = 401 - InternalErrorCode = 500 + SuccessCode = 200 + MultipleChoiceCode = 300 + UnauthorizedCode = 401 + PreconditionFailedCode = 412 + InternalErrorCode = 500 ) // hostHTTPResult is used to save result of an Adapter's sendRequest(...) function @@ -97,13 +98,17 @@ const respSuccStatusCode = 0 // The HTTP response with a 401 status code can have several scenarios: // 1. Wrong password // 2. Wrong certificate -// 3. The local node has not yet joined the cluster; the HTTP server will accept connections once the node joins the cluster. -// HTTPCheckDBRunningOp in create_db need to check all scenarios to see any HTTP running -// For HTTPSPollNodeStateOp in start_db, it requires only handling the first and second scenarios +// HTTPCheckDBRunningOp in create_db and HTTPSPollNodeStateOp in start_db need to handle these scenarios func (hostResult *hostHTTPResult) isUnauthorizedRequest() bool { return hostResult.statusCode == UnauthorizedCode } +// The HTTP response with a 412 may happen if +// the local node has not yet joined the cluster; the HTTP server will accept connections once the node joins the cluster. +func (hostResult *hostHTTPResult) hasPreconditionFailed() bool { + return hostResult.statusCode == PreconditionFailedCode +} + // isSuccess returns true if status code is 200 func (hostResult *hostHTTPResult) isSuccess() bool { return hostResult.statusCode == SuccessCode @@ -129,7 +134,8 @@ func (hostResult *hostHTTPResult) isInternalError() bool { } func (hostResult *hostHTTPResult) isHTTPRunning() bool { - if hostResult.isPassing() || hostResult.isUnauthorizedRequest() || hostResult.isInternalError() { + if hostResult.isPassing() || hostResult.isUnauthorizedRequest() || + hostResult.isInternalError() || hostResult.hasPreconditionFailed() { return true } return false @@ -540,33 +546,36 @@ type ClusterCommands interface { DisplayError(msg string, v ...any) VAddNode(options *VAddNodeOptions) (VCoordinationDatabase, error) - VStopNode(options *VStopNodeOptions) error VAddSubcluster(options *VAddSubclusterOptions) error + VAlterSubclusterType(options *VAlterSubclusterTypeOptions) error + VCheckVClusterServerPid(options *VCheckVClusterServerPidOptions) ([]string, error) VCreateDatabase(options *VCreateDatabaseOptions) (VCoordinationDatabase, error) + VCreateArchive(options *VCreateArchiveOptions) error VDropDatabase(options *VDropDatabaseOptions) error + VFetchCoordinationDatabase(options *VFetchCoordinationDatabaseOptions) (VCoordinationDatabase, error) + VFetchNodesDetails(options *VFetchNodesDetailsOptions) (NodesDetails, error) VFetchNodeState(options *VFetchNodeStateOptions) ([]NodeInfo, error) + VGetDrainingStatus(options *VGetDrainingStatusOptions) (DrainingStatusList, error) VInstallPackages(options *VInstallPackagesOptions) (*InstallPackageStatus, error) + VPollSubclusterState(options *VPollSubclusterStateOptions) error + VPromoteSandboxToMain(options *VPromoteSandboxToMainOptions) error VReIP(options *VReIPOptions) error VRemoveNode(options *VRemoveNodeOptions) (VCoordinationDatabase, error) VRemoveSubcluster(removeScOpt *VRemoveScOptions) (VCoordinationDatabase, error) + VRenameSubcluster(options *VRenameSubclusterOptions) error + VReplicateDatabase(options *VReplicationDatabaseOptions) error VReviveDatabase(options *VReviveDatabaseOptions) (dbInfo string, vdbPtr *VCoordinationDatabase, err error) VSandbox(options *VSandboxOptions) error VScrutinize(options *VScrutinizeOptions) error VShowRestorePoints(options *VShowRestorePointsOptions) (restorePoints []RestorePoint, err error) + VSaveRestorePoint(options *VSaveRestorePointOptions) (err error) VStartDatabase(options *VStartDatabaseOptions) (vdbPtr *VCoordinationDatabase, err error) VStartNodes(options *VStartNodesOptions) error VStartSubcluster(startScOpt *VStartScOptions) error VStopDatabase(options *VStopDatabaseOptions) error - VReplicateDatabase(options *VReplicationDatabaseOptions) error - VFetchCoordinationDatabase(options *VFetchCoordinationDatabaseOptions) (VCoordinationDatabase, error) - VUnsandbox(options *VUnsandboxOptions) error + VStopNode(options *VStopNodeOptions) error VStopSubcluster(options *VStopSubclusterOptions) error - VAlterSubclusterType(options *VAlterSubclusterTypeOptions) error - VPromoteSandboxToMain(options *VPromoteSandboxToMainOptions) error - VRenameSubcluster(options *VRenameSubclusterOptions) error - VFetchNodesDetails(options *VFetchNodesDetailsOptions) (NodesDetails, error) - - VCheckVClusterServerPid(options *VCheckVClusterServerPidOptions) ([]string, error) + VUnsandbox(options *VUnsandboxOptions) error } type VClusterCommandsLogger struct { diff --git a/vclusterops/cluster_op_engine.go b/vclusterops/cluster_op_engine.go index d38f9dc..cd76e8c 100644 --- a/vclusterops/cluster_op_engine.go +++ b/vclusterops/cluster_op_engine.go @@ -100,7 +100,7 @@ func (opEngine *VClusterOpEngine) runInstruction( op.logFinalize() err = op.finalize(execContext) if err != nil { - return fmt.Errorf("finalize failed %w", err) + return fmt.Errorf("finalize %s failed, details: %w", op.getName(), err) } logger.PrintInfo("[%s] is successfully completed", op.getName()) diff --git a/vclusterops/cmd_type.go b/vclusterops/cmd_type.go index 96fedf3..25189ec 100644 --- a/vclusterops/cmd_type.go +++ b/vclusterops/cmd_type.go @@ -20,8 +20,10 @@ const ( SandboxSCCmd UnsandboxSCCmd ShowRestorePointsCmd + SaveRestorePointsCmd InstallPackagesCmd ConfigRecoverCmd + GetDrainingStatusCmd ManageConnectionDrainingCmd SetConfigurationParameterCmd GetConfigurationParameterCmd @@ -39,6 +41,8 @@ const ( AddNodeSyncCat StartNodeSyncCat RemoveNodeSyncCat + CreateArchiveCmd + PollSubclusterStateCmd ) var cmdStringMap = map[CmdType]string{ @@ -58,8 +62,10 @@ var cmdStringMap = map[CmdType]string{ SandboxSCCmd: "sandbox_subcluster", UnsandboxSCCmd: "unsandbox_subcluster", ShowRestorePointsCmd: "show_restore_points", + SaveRestorePointsCmd: "save_restore_point", InstallPackagesCmd: "install_packages", ConfigRecoverCmd: "manage_config_recover", + GetDrainingStatusCmd: "get_draining_status", ManageConnectionDrainingCmd: "manage_connection_draining", SetConfigurationParameterCmd: "set_configuration_parameter", ReplicationStartCmd: "replication_start", @@ -76,6 +82,8 @@ var cmdStringMap = map[CmdType]string{ AddNodeSyncCat: "add_node_sync_cat", StartNodeSyncCat: "start_node_sync_cat", RemoveNodeSyncCat: "remove_node_sync_cat", + CreateArchiveCmd: "create_archive", + PollSubclusterStateCmd: "poll_subcluster_state", } func (cmd CmdType) CmdString() string { diff --git a/vclusterops/coordinator_database.go b/vclusterops/coordinator_database.go index b5644d9..839458c 100644 --- a/vclusterops/coordinator_database.go +++ b/vclusterops/coordinator_database.go @@ -317,6 +317,30 @@ func (vdb *VCoordinationDatabase) filterPrimaryNodes() { vdb.HostList = maps.Keys(vdb.HostNodeMap) } +// Update and limit the hostlist based on status and sandbox info +// If sandbox provided, pick up sandbox up hosts and return. Else return up hosts. +func (vdb *VCoordinationDatabase) filterUpHostlist(inputHosts []string, sandbox string) []string { + var clusterHosts []string + var upSandboxHosts []string + + for _, h := range inputHosts { + vnode, ok := vdb.HostNodeMap[h] + if !ok { + // host address not found in vdb, skip it + continue + } + if vnode.Sandbox == "" && vnode.State == util.NodeUpState { + clusterHosts = append(clusterHosts, vnode.Address) + } else if vnode.Sandbox == sandbox && vnode.State == util.NodeUpState { + upSandboxHosts = append(upSandboxHosts, vnode.Address) + } + } + if sandbox == "" { + return clusterHosts + } + return upSandboxHosts +} + // hostIsUp returns true if the host is up func (vdb *VCoordinationDatabase) hostIsUp(hostName string) bool { return vdb.HostNodeMap[hostName].State == util.NodeUpState diff --git a/vclusterops/create_archive.go b/vclusterops/create_archive.go new file mode 100644 index 0000000..037328b --- /dev/null +++ b/vclusterops/create_archive.go @@ -0,0 +1,195 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +const CreateArchiveDefaultNumRestore = 0 + +type VCreateArchiveOptions struct { + DatabaseOptions + + // Required arguments + ArchiveName string + // Optional arguments + NumOfArchives int + Sandbox string +} + +func VCreateArchiveFactory() VCreateArchiveOptions { + options := VCreateArchiveOptions{} + // set default values to the params + options.setDefaultValues() + + return options +} + +func (options *VCreateArchiveOptions) setDefaultValues() { + options.DatabaseOptions.setDefaultValues() +} + +func (options *VCreateArchiveOptions) validateRequiredOptions(logger vlog.Printer) error { + err := options.validateEonOptions(logger) + if err != nil { + return err + } + err = options.validateBaseOptions(CreateArchiveCmd, logger) + if err != nil { + return err + } + if options.ArchiveName == "" { + return fmt.Errorf("must specify an archive name") + } + + err = util.ValidateArchiveName(options.ArchiveName) + if err != nil { + return err + } + return nil +} + +func (options *VCreateArchiveOptions) validateExtraOptions() error { + if options.NumOfArchives < 0 { + return fmt.Errorf("number of restore points must greater than 0") + } + if options.Sandbox != "" { + return util.ValidateSandboxName(options.Sandbox) + } + return nil +} + +func (options *VCreateArchiveOptions) validateEonOptions(_ vlog.Printer) error { + if !options.IsEon { + return fmt.Errorf("create archive is only supported in Eon mode") + } + return nil +} + +func (options *VCreateArchiveOptions) validateParseOptions(log vlog.Printer) error { + // validate required parameters + err := options.validateRequiredOptions(log) + if err != nil { + return err + } + + err = options.validateEonOptions(log) + if err != nil { + return err + } + + err = options.validateAuthOptions(CreateArchiveCmd.CmdString(), log) + if err != nil { + return err + } + + // validate all other params + err = options.validateExtraOptions() + if err != nil { + return err + } + return nil +} + +// analyzeOptions will modify some options based on what is chosen +func (options *VCreateArchiveOptions) analyzeOptions() (err error) { + // we analyze host names when it is set in user input, otherwise we use hosts in yaml config + if len(options.RawHosts) > 0 { + // resolve RawHosts to be IP addresses + hostAddresses, err := util.ResolveRawHostsToAddresses(options.RawHosts, options.IPv6) + if err != nil { + return err + } + options.Hosts = hostAddresses + } + return nil +} + +func (options *VCreateArchiveOptions) validateAnalyzeOptions(log vlog.Printer) error { + if err := options.validateParseOptions(log); err != nil { + return err + } + if err := options.analyzeOptions(); err != nil { + return err + } + if err := options.setUsePassword(log); err != nil { + return err + } + return options.validateUserName(log) +} + +func (vcc VClusterCommands) VCreateArchive(options *VCreateArchiveOptions) error { + /* + * - Produce Instructions + * - Create a VClusterOpEngine + * - Give the instructions to the VClusterOpEngine to run + */ + + // validate and analyze options + err := options.validateAnalyzeOptions(vcc.Log) + if err != nil { + return err + } + + // produce create acchive instructions + instructions, err := vcc.produceCreateArchiveInstructions(options) + if err != nil { + return fmt.Errorf("fail to produce instructions, %w", err) + } + + // create a VClusterOpEngine, and add certs to the engine + clusterOpEngine := makeClusterOpEngine(instructions, options) + + // give the instructions to the VClusterOpEngine to run + runError := clusterOpEngine.run(vcc.Log) + if runError != nil { + return fmt.Errorf("fail to create archive: %w", runError) + } + return nil +} + +// The generated instructions will later perform the following operations necessary +// for a successful create_archive: +// - Retrieve VDB from HTTP endpoints +// - Run create archive query +func (vcc *VClusterCommands) produceCreateArchiveInstructions(options *VCreateArchiveOptions) ([]clusterOp, error) { + var instructions []clusterOp + vdb := makeVCoordinationDatabase() + + err := vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, util.MainClusterSandbox) + if err != nil { + return instructions, err + } + // get up hosts + hosts := options.Hosts + // Trim host list + hosts = vdb.filterUpHostlist(hosts, options.Sandbox) + bootstrapHost := []string{getInitiator(hosts)} + + httpsCreateArchiveOp, err := makeHTTPSCreateArchiveOp(bootstrapHost, options.usePassword, + options.UserName, options.Password, options.ArchiveName, options.NumOfArchives) + if err != nil { + return instructions, err + } + + instructions = append(instructions, + &httpsCreateArchiveOp) + return instructions, nil +} diff --git a/vclusterops/create_db.go b/vclusterops/create_db.go index 03d4992..57ae8ee 100644 --- a/vclusterops/create_db.go +++ b/vclusterops/create_db.go @@ -295,6 +295,7 @@ func (vcc VClusterCommands) VCreateDatabase(options *VCreateDatabaseOptions) (VC vdb := makeVCoordinationDatabase() err := vdb.setFromCreateDBOptions(options, vcc.Log) if err != nil { + vcc.Log.Error(err, "fail to create database") return vdb, err } // produce instructions diff --git a/vclusterops/fetch_node_state.go b/vclusterops/fetch_node_state.go index 45d9646..e2cbfd7 100644 --- a/vclusterops/fetch_node_state.go +++ b/vclusterops/fetch_node_state.go @@ -72,7 +72,7 @@ func (vcc VClusterCommands) VFetchNodeState(options *VFetchNodeStateOptions) ([] // this vdb is used to fetch node version var vdb VCoordinationDatabase - err = vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, util.MainClusterSandbox) + err = vcc.getVDBFromMainRunningDBContainsSandbox(&vdb, &options.DatabaseOptions) if err != nil { vcc.Log.PrintInfo("Error from vdb build: %s", err.Error()) @@ -91,7 +91,13 @@ func (vcc VClusterCommands) VFetchNodeState(options *VFetchNodeStateOptions) ([] return vcc.fetchNodeStateFromDownDB(options) } - // produce list_all_nodes instructions + nodeStates := buildNodeStateList(&vdb, false /*forDownDatabase*/) + // return the result if no need to get version info + if !options.GetVersion { + return nodeStates, nil + } + + // produce instructions to fill node information instructions, err := vcc.produceListAllNodesInstructions(options, &vdb) if err != nil { return nil, fmt.Errorf("fail to produce instructions, %w", err) @@ -102,7 +108,6 @@ func (vcc VClusterCommands) VFetchNodeState(options *VFetchNodeStateOptions) ([] // give the instructions to the VClusterOpEngine to run runError := clusterOpEngine.run(vcc.Log) - nodeStates := clusterOpEngine.execContext.nodesInfo if runError == nil { // fill node version for i, nodeInfo := range nodeStates { @@ -116,34 +121,9 @@ func (vcc VClusterCommands) VFetchNodeState(options *VFetchNodeStateOptions) ([] nodeInfo.Address) } } - - return nodeStates, nil - } - - // error out in case of wrong certificate or password - if len(clusterOpEngine.execContext.hostsWithWrongAuth) > 0 { - return nodeStates, - fmt.Errorf("wrong certificate or password on hosts %v", clusterOpEngine.execContext.hostsWithWrongAuth) - } - - // if failed to get node info from a running database, - // we will try to get it by reading catalog editor - upNodeCount := 0 - for _, n := range nodeStates { - if n.State == util.NodeUpState { - upNodeCount++ - } - } - - if upNodeCount == 0 { - if options.SkipDownDatabase { - return []NodeInfo{}, rfc7807.New(rfc7807.FetchDownDatabase) - } - - return vcc.fetchNodeStateFromDownDB(options) } - return nodeStates, runError + return nodeStates, nil } func (vcc VClusterCommands) fetchNodeStateFromDownDB(options *VFetchNodeStateOptions) ([]NodeInfo, error) { @@ -163,18 +143,7 @@ func (vcc VClusterCommands) fetchNodeStateFromDownDB(options *VFetchNodeStateOpt return nodeStates, err } - for _, h := range vdb.HostList { - var nodeInfo NodeInfo - n := vdb.HostNodeMap[h] - nodeInfo.Address = n.Address - nodeInfo.Name = n.Name - nodeInfo.CatalogPath = n.CatalogPath - nodeInfo.Subcluster = n.Subcluster - nodeInfo.IsPrimary = n.IsPrimary - nodeInfo.Version = n.Version - nodeInfo.State = util.NodeDownState - nodeStates = append(nodeStates, nodeInfo) - } + nodeStates = buildNodeStateList(&vdb, true /*forDownDatabase*/) return nodeStates, nil } @@ -186,73 +155,64 @@ func (vcc VClusterCommands) produceListAllNodesInstructions( vdb *VCoordinationDatabase) ([]clusterOp, error) { var instructions []clusterOp - // get hosts - hosts := options.Hosts - - // validate user name - usePassword := false - if options.Password != nil { - usePassword = true - err := options.validateUserName(vcc.Log) - if err != nil { - return instructions, err - } - } - nmaHealthOp := makeNMAHealthOpSkipUnreachable(options.Hosts) nmaReadVerticaVersionOp := makeNMAReadVerticaVersionOp(vdb) - // Trim host list - hosts = options.updateHostlist(vcc, vdb, hosts) - - httpsCheckNodeStateOp, err := makeHTTPSCheckNodeStateOp(hosts, - usePassword, options.UserName, options.Password) - if err != nil { - return instructions, err - } - if options.GetVersion { instructions = append(instructions, &nmaHealthOp, &nmaReadVerticaVersionOp) } - instructions = append(instructions, - &httpsCheckNodeStateOp, - ) - return instructions, nil } -// Update and limit the hostlist based on status and sandbox info -// Note: if we have any UP main cluster host in the input list, the trimmed hostlist would always contain -// -// only main cluster UP hosts. -func (options *VFetchNodeStateOptions) updateHostlist(vcc VClusterCommands, vdb *VCoordinationDatabase, inputHosts []string) []string { - var mainClusterHosts []string - var upSandboxHosts []string - - for _, h := range inputHosts { - vnode, ok := vdb.HostNodeMap[h] - if !ok { - // host address not found in vdb, skip it - continue +func buildNodeStateList(vdb *VCoordinationDatabase, forDownDatabase bool) []NodeInfo { + var nodeStates []NodeInfo + + // a map from a subcluster name to whether it is primary + // Context: if a node is primary, the subcluster it belongs to is a primary subcluster. + // If any of the nodes are down in such a primary subcluster, HTTPSUpdateNodeStateOp cannot correctly + // update its IsPrimary value, because this op sends request to each host. + // We use the following scMap to check whether any node is primary in each subcluster, + // then update other nodes' IsPrimary value in this subcluster. + scMap := make(map[string]bool) + + for _, h := range vdb.HostList { + var nodeInfo NodeInfo + n := vdb.HostNodeMap[h] + nodeInfo.Address = n.Address + nodeInfo.CatalogPath = n.CatalogPath + nodeInfo.IsPrimary = n.IsPrimary + nodeInfo.Name = n.Name + nodeInfo.Sandbox = n.Sandbox + if forDownDatabase { + nodeInfo.State = util.NodeDownState + } else { + nodeInfo.State = n.State } - if vnode.Sandbox == "" && (vnode.State == util.NodeUpState || vnode.State == util.NodeUnknownState) { - mainClusterHosts = append(mainClusterHosts, vnode.Address) - } else if vnode.State == util.NodeUpState { - upSandboxHosts = append(upSandboxHosts, vnode.Address) + nodeInfo.Subcluster = n.Subcluster + nodeInfo.Version = n.Version + + nodeStates = append(nodeStates, nodeInfo) + + if !forDownDatabase { + if isPrimary, exists := scMap[n.Subcluster]; exists { + scMap[n.Subcluster] = isPrimary || n.IsPrimary + } else { + scMap[n.Subcluster] = n.IsPrimary + } } } - if len(mainClusterHosts) > 0 { - vcc.Log.PrintWarning("Main cluster UP node found in host list. The status would be fetched from a main cluster host!") - return mainClusterHosts - } - if len(upSandboxHosts) > 0 { - vcc.Log.PrintWarning("Only sandboxed UP nodes found in host list. The status would be fetched from a sandbox host!") - return upSandboxHosts + + // update IsPrimary of the nodes for running database + if !forDownDatabase { + for i := 0; i < len(nodeStates); i++ { + nodeInfo := nodeStates[i] + scName := nodeInfo.Subcluster + nodeStates[i].IsPrimary = scMap[scName] + } } - // We do not have an up host, so better try with complete input hostlist - return inputHosts + return nodeStates } diff --git a/vclusterops/get_config_parameter_test.go b/vclusterops/get_config_parameter_test.go index f41e757..c5b7ac8 100644 --- a/vclusterops/get_config_parameter_test.go +++ b/vclusterops/get_config_parameter_test.go @@ -26,10 +26,10 @@ func TestVGetConfigurationParameterOptions_validateParseOptions(t *testing.T) { logger := vlog.Printer{} opt := VGetConfigurationParameterOptionsFactory() - testPassword := "get-config-test-password" //nolint:gosec + testPd := "get-config-test-pd" testSandbox := "get-config-test-sandbox" testDBName := "get_config_test_dbname" - testUsername := "get-config-test-username" //nolint:gosec + testUsername := "get-config-test-un" testConfigParameter := "get-config-test-parameter" testLevel := "get-config-test-level" @@ -37,7 +37,7 @@ func TestVGetConfigurationParameterOptions_validateParseOptions(t *testing.T) { opt.RawHosts = append(opt.RawHosts, "get-config-test-raw-host") opt.DBName = testDBName opt.UserName = testUsername - opt.Password = &testPassword + opt.Password = &testPd opt.ConfigParameter = testConfigParameter opt.Level = testLevel diff --git a/vclusterops/get_draining_status.go b/vclusterops/get_draining_status.go new file mode 100644 index 0000000..59bef52 --- /dev/null +++ b/vclusterops/get_draining_status.go @@ -0,0 +1,167 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +type DrainingStatus struct { + SubclusterName string `json:"subcluster_name"` + Status string `json:"drain_status"` + RedirectTo string `json:"redirect_to"` +} + +type DrainingStatusList struct { + StatusList []DrainingStatus `json:"draining_status_list"` +} + +type VGetDrainingStatusOptions struct { + // basic db info + DatabaseOptions + + // the name of the sandbox to target, if left empty the main cluster is assumed + Sandbox string +} + +func VGetDrainingStatusFactory() VGetDrainingStatusOptions { + opt := VGetDrainingStatusOptions{} + // set default values to the params + opt.setDefaultValues() + + return opt +} + +func (opt *VGetDrainingStatusOptions) validateEonOptions(_ vlog.Printer) error { + if !opt.IsEon { + return fmt.Errorf("getting draining status is only supported in Eon mode") + } + return nil +} + +func (opt *VGetDrainingStatusOptions) validateParseOptions(logger vlog.Printer) error { + err := opt.validateEonOptions(logger) + if err != nil { + return err + } + + err = opt.validateBaseOptions(GetDrainingStatusCmd, logger) + if err != nil { + return err + } + + err = opt.validateAuthOptions(GetDrainingStatusCmd.CmdString(), logger) + if err != nil { + return err + } + + return opt.validateExtraOptions() +} + +func (opt *VGetDrainingStatusOptions) validateExtraOptions() error { + if opt.Sandbox != "" { + return util.ValidateSandboxName(opt.Sandbox) + } + return nil +} + +func (opt *VGetDrainingStatusOptions) analyzeOptions() (err error) { + // we analyze host names when it is set in user input, otherwise we use hosts in yaml config + if len(opt.RawHosts) > 0 { + // resolve RawHosts to be IP addresses + opt.Hosts, err = util.ResolveRawHostsToAddresses(opt.RawHosts, opt.IPv6) + if err != nil { + return err + } + opt.normalizePaths() + } + return nil +} + +func (opt *VGetDrainingStatusOptions) validateAnalyzeOptions(log vlog.Printer) error { + if err := opt.validateParseOptions(log); err != nil { + return err + } + if err := opt.analyzeOptions(); err != nil { + return err + } + if err := opt.setUsePassword(log); err != nil { + return err + } + return opt.validateUserName(log) +} + +// VGetDrainingStatus retrieves draining status of subclusters in the main cluster or a sandbox +func (vcc VClusterCommands) VGetDrainingStatus(options *VGetDrainingStatusOptions) (dsList DrainingStatusList, err error) { + // validate and analyze all options + err = options.validateAnalyzeOptions(vcc.Log) + if err != nil { + return dsList, err + } + + // produce get-draining-status instructions + instructions, err := vcc.produceGetDrainingStatusInstructions(options, &dsList) + if err != nil { + return dsList, fmt.Errorf("fail to produce instructions, %w", err) + } + + // Create a VClusterOpEngine, and add certs to the engine + clusterOpEngine := makeClusterOpEngine(instructions, options) + + // Give the instructions to the VClusterOpEngine to run + runError := clusterOpEngine.run(vcc.Log) + if runError != nil { + return dsList, fmt.Errorf("fail to get draining status: %w", runError) + } + + return dsList, nil +} + +// The generated instructions will later perform the following operations necessary +// for getting draining status of a cluster. +// - Get up hosts of target sandbox +// - Send get-draining-status request on the up hosts +func (vcc VClusterCommands) produceGetDrainingStatusInstructions( + options *VGetDrainingStatusOptions, drainingStatusList *DrainingStatusList) ([]clusterOp, error) { + var instructions []clusterOp + + assertMainClusterUpNodes := options.Sandbox == "" + + // get up hosts in all sandboxes/clusters + // exit early if specified sandbox has no up hosts + httpsGetUpNodesOp, err := makeHTTPSGetUpNodesWithSandboxOp(options.DBName, options.Hosts, + options.usePassword, options.UserName, options.Password, + GetDrainingStatusCmd, options.Sandbox, assertMainClusterUpNodes) + if err != nil { + return instructions, err + } + + httpsGetDrainingStatusOp, err := makeHTTPSGetDrainingStatusOp(options.usePassword, + options.Sandbox, options.UserName, options.Password, drainingStatusList) + if err != nil { + return instructions, err + } + + instructions = append(instructions, + &httpsGetUpNodesOp, + &httpsGetDrainingStatusOp, + ) + + return instructions, nil +} diff --git a/vclusterops/https_add_subcluster_op.go b/vclusterops/https_add_subcluster_op.go index 3734df1..2f228cb 100644 --- a/vclusterops/https_add_subcluster_op.go +++ b/vclusterops/https_add_subcluster_op.go @@ -81,7 +81,7 @@ func (op *httpsAddSubclusterOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName) + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_check_node_state_op.go b/vclusterops/https_check_node_state_op.go deleted file mode 100644 index 46adb6a..0000000 --- a/vclusterops/https_check_node_state_op.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - (c) Copyright [2023-2024] Open Text. - Licensed under the Apache License, Version 2.0 (the "License"); - You may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package vclusterops - -import ( - "errors" - "fmt" - - "github.com/vertica/vcluster/vclusterops/util" -) - -type httpsCheckNodeStateOp struct { - opBase - opHTTPSBase -} - -func makeHTTPSCheckNodeStateOp(hosts []string, - useHTTPPassword bool, - userName string, - httpsPassword *string, -) (httpsCheckNodeStateOp, error) { - op := httpsCheckNodeStateOp{} - op.name = "HTTPCheckNodeStateOp" - op.description = "Check node state from running database" - // The hosts are the ones we are going to talk to. - // They can be a subset of the actual host information that we return, - // as if any of the hosts is responsive, spread can give us the info of all nodes - op.hosts = hosts - op.useHTTPPassword = useHTTPPassword - - err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) - if err != nil { - return op, err - } - - op.userName = userName - op.httpsPassword = httpsPassword - return op, nil -} - -func (op *httpsCheckNodeStateOp) setupClusterHTTPRequest(hosts []string) error { - for _, host := range hosts { - httpRequest := hostHTTPRequest{} - httpRequest.Method = GetMethod - httpRequest.buildHTTPSEndpoint("nodes") - if op.useHTTPPassword { - httpRequest.Password = op.httpsPassword - httpRequest.Username = op.userName - } - op.clusterHTTPRequest.RequestCollection[host] = httpRequest - } - - return nil -} - -func (op *httpsCheckNodeStateOp) prepare(execContext *opEngineExecContext) error { - execContext.dispatcher.setup(op.hosts) - - return op.setupClusterHTTPRequest(op.hosts) -} - -func (op *httpsCheckNodeStateOp) execute(execContext *opEngineExecContext) error { - if err := op.runExecute(execContext); err != nil { - return err - } - - return op.processResult(execContext) -} - -func (op *httpsCheckNodeStateOp) processResult(execContext *opEngineExecContext) error { - var allErrs error - respondingNodeCount := 0 - - for host, result := range op.clusterHTTPRequest.ResultCollection { - op.logResponse(host, result) - - if result.isUnauthorizedRequest() { - op.logger.PrintError("[%s] unauthorized request: %s", op.name, result.content) - execContext.hostsWithWrongAuth = append(execContext.hostsWithWrongAuth, host) - // return here because we assume that - // we will get the same error across other nodes - allErrs = errors.Join(allErrs, result.err) - return allErrs - } - - if !result.isPassing() { - // for any error, we continue to the next node - if result.isInternalError() { - op.logger.PrintError("[%s] internal error of the /nodes endpoint: %s", op.name, result.content) - // At internal error originated from the server, so its a - // response, just not a successful one. - respondingNodeCount++ - } - allErrs = errors.Join(allErrs, result.err) - continue - } - - // parse the /nodes endpoint response - respondingNodeCount++ - nodesStates := nodesStateInfo{} - err := op.parseAndCheckResponse(host, result.content, &nodesStates) - if err != nil { - err = fmt.Errorf("[%s] fail to parse result on host %s: %w", - op.name, host, err) - allErrs = errors.Join(allErrs, err) - continue - } - - nodesInfo := nodesInfo{} - for _, node := range nodesStates.NodeList { - n := node.asNodeInfoWithoutVer() - nodesInfo.NodeList = append(nodesInfo.NodeList, n) - } - // successful case, write the result into exec context - execContext.nodesInfo = nodesInfo.NodeList - op.logger.PrintInfo("reporting results as obtained from the host [%s] ", host) - return nil - } - - return allErrs -} - -func (op *httpsCheckNodeStateOp) finalize(_ *opEngineExecContext) error { - return nil -} diff --git a/vclusterops/https_check_subcluster_op.go b/vclusterops/https_check_subcluster_op.go index 09e4e6b..cb24ead 100644 --- a/vclusterops/https_check_subcluster_op.go +++ b/vclusterops/https_check_subcluster_op.go @@ -54,7 +54,7 @@ func (op *httpsCheckSubclusterOp) setupClusterHTTPRequest(hosts []string) error for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = GetMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName) + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_create_archive_op.go b/vclusterops/https_create_archive_op.go new file mode 100644 index 0000000..343e281 --- /dev/null +++ b/vclusterops/https_create_archive_op.go @@ -0,0 +1,152 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" +) + +type httpsCreateArchiveOp struct { + opBase + opHTTPSBase + ArchiveName string + NumRestorePoints int + hostRequestBodyMap map[string]string +} + +type createArchiveRequestData struct { + NumRestorePoints int `json:"num_restore_points,omitempty"` +} + +func (op *httpsCreateArchiveOp) setupRequestBody(hosts []string) error { + op.hostRequestBodyMap = make(map[string]string) + + for _, host := range hosts { + createArchiveData := createArchiveRequestData{} + if op.NumRestorePoints != CreateArchiveDefaultNumRestore { + createArchiveData.NumRestorePoints = op.NumRestorePoints + } + dataBytes, err := json.Marshal(createArchiveData) + if err != nil { + return fmt.Errorf("[%s] fail to marshal request data to JSON string, detail %w", op.name, err) + } + + op.hostRequestBodyMap[host] = string(dataBytes) + } + + return nil +} + +// makeHTTPSCreateArchiveOp will make an op that call vertica-http service to create archive for database +func makeHTTPSCreateArchiveOp(hosts []string, useHTTPPassword bool, userName string, + httpsPassword *string, archiveName string, numRestorePoints int, +) (httpsCreateArchiveOp, error) { + op := httpsCreateArchiveOp{} + op.name = "HTTPSCreateArchiveOp" + op.description = "Create archive for database" + op.hosts = hosts + op.useHTTPPassword = useHTTPPassword + if useHTTPPassword { + err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) + if err != nil { + return op, err + } + op.userName = userName + op.httpsPassword = httpsPassword + } + op.ArchiveName = archiveName + op.NumRestorePoints = numRestorePoints + return op, nil +} + +func (op *httpsCreateArchiveOp) setupClusterHTTPRequest(hosts []string) error { + for _, host := range hosts { + httpRequest := hostHTTPRequest{} + httpRequest.Method = PostMethod + httpRequest.buildHTTPSEndpoint(util.ArchiveEndpoint + "/" + op.ArchiveName) + if op.useHTTPPassword { + httpRequest.Password = op.httpsPassword + httpRequest.Username = op.userName + } + httpRequest.RequestData = op.hostRequestBodyMap[host] + op.clusterHTTPRequest.RequestCollection[host] = httpRequest + } + + return nil +} + +func (op *httpsCreateArchiveOp) prepare(execContext *opEngineExecContext) error { + err := op.setupRequestBody(op.hosts) + if err != nil { + return err + } + execContext.dispatcher.setup(op.hosts) + + return op.setupClusterHTTPRequest(op.hosts) +} + +func (op *httpsCreateArchiveOp) execute(execContext *opEngineExecContext) error { + if err := op.runExecute(execContext); err != nil { + return err + } + + return op.processResult(execContext) +} + +func (op *httpsCreateArchiveOp) processResult(_ *opEngineExecContext) error { + var allErrs error + + // every host needs to have a successful result, otherwise we fail this op + // because we want depot created successfully on all hosts + for host, result := range op.clusterHTTPRequest.ResultCollection { + op.logResponse(host, result) + + if result.isUnauthorizedRequest() { + return fmt.Errorf("[%s] wrong password/certificate for https service on host %s", + op.name, host) + } + + if !result.isPassing() { + allErrs = errors.Join(allErrs, result.err) + // not break here because we want to log all the failed nodes + continue + } + + /* decode the json-format response + The successful response object will be a dictionary like below: + { + "detail": "" + } + + */ + _, err := op.parseAndCheckMapResponse(host, result.content) + if err != nil { + err = fmt.Errorf(`[%s] fail to parse result on host %s, details: %w`, op.name, host, err) + allErrs = errors.Join(allErrs, err) + // not break here because we want to log all the failed nodes + continue + } + } + return allErrs +} + +func (op *httpsCreateArchiveOp) finalize(_ *opEngineExecContext) error { + return nil +} diff --git a/vclusterops/https_create_nodes_depot_op.go b/vclusterops/https_create_nodes_depot_op.go index dfd6347..ed1c7af 100644 --- a/vclusterops/https_create_nodes_depot_op.go +++ b/vclusterops/https_create_nodes_depot_op.go @@ -56,7 +56,7 @@ func (op *httpsCreateNodesDepotOp) setupClusterHTTPRequest(hosts []string) error httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod node := op.HostNodeMap[host] - httpRequest.buildHTTPSEndpoint("nodes/" + node.Name + "/depot") + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + node.Name + "/depot") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_demote_subcluster_op.go b/vclusterops/https_demote_subcluster_op.go index 64f939d..327548a 100644 --- a/vclusterops/https_demote_subcluster_op.go +++ b/vclusterops/https_demote_subcluster_op.go @@ -59,7 +59,7 @@ func (op *httpsDemoteSubclusterOp) setupClusterHTTPRequest(hosts []string) error for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/demote") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/demote") if op.useHTTPPassword { httpRequest.Username = op.userName httpRequest.Password = op.httpsPassword diff --git a/vclusterops/https_drop_node_op.go b/vclusterops/https_drop_node_op.go index 2df1a18..4990f02 100644 --- a/vclusterops/https_drop_node_op.go +++ b/vclusterops/https_drop_node_op.go @@ -58,7 +58,7 @@ func (op *httpsDropNodeOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("nodes/" + op.targetHost + "/drop") + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + op.targetHost + util.DropEndpoint) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_drop_subcluster_op.go b/vclusterops/https_drop_subcluster_op.go index d1eccd2..0573791 100644 --- a/vclusterops/https_drop_subcluster_op.go +++ b/vclusterops/https_drop_subcluster_op.go @@ -54,7 +54,7 @@ func (op *httpsDropSubclusterOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/drop") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + util.DropEndpoint) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_get_draining_status_op.go b/vclusterops/https_get_draining_status_op.go new file mode 100644 index 0000000..4e806ca --- /dev/null +++ b/vclusterops/https_get_draining_status_op.go @@ -0,0 +1,134 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "errors" + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" +) + +type httpsGetDrainingStatusOp struct { + opBase + opHTTPSBase + sandbox string + dsList *DrainingStatusList +} + +func makeHTTPSGetDrainingStatusOp(useHTTPPassword bool, sandbox, userName string, + httpsPassword *string, drainingStatusList *DrainingStatusList) (httpsGetDrainingStatusOp, error) { + op := httpsGetDrainingStatusOp{} + op.name = "HTTPSGetDrainingStatusOp" + op.description = "Get draining status" + op.useHTTPPassword = useHTTPPassword + if useHTTPPassword { + err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) + if err != nil { + return op, err + } + op.userName = userName + op.httpsPassword = httpsPassword + } + op.sandbox = sandbox + op.dsList = drainingStatusList + return op, nil +} + +func (op *httpsGetDrainingStatusOp) setupClusterHTTPRequest(hosts []string) error { + for _, host := range hosts { + httpRequest := hostHTTPRequest{} + httpRequest.Method = GetMethod + httpRequest.buildHTTPSEndpoint("subcluster/draining-status") + if op.useHTTPPassword { + httpRequest.Password = op.httpsPassword + httpRequest.Username = op.userName + } + op.clusterHTTPRequest.RequestCollection[host] = httpRequest + } + + return nil +} + +func (op *httpsGetDrainingStatusOp) prepare(execContext *opEngineExecContext) error { + if len(execContext.upHostsToSandboxes) == 0 { + return fmt.Errorf(`[%s] Cannot find any up hosts in OpEngineExecContext`, op.name) + } + // pick up hosts in the sandbox to send the https request + for host, sb := range execContext.upHostsToSandboxes { + if sb == op.sandbox { + op.hosts = append(op.hosts, host) + } + } + if len(op.hosts) == 0 { + return fmt.Errorf(`[%s] Cannot find any up hosts in %s`, op.name, util.GetClusterName(op.sandbox)) + } + + execContext.dispatcher.setup(op.hosts) + return op.setupClusterHTTPRequest(op.hosts) +} + +func (op *httpsGetDrainingStatusOp) execute(execContext *opEngineExecContext) error { + if err := op.runExecute(execContext); err != nil { + return err + } + + return op.processResult(execContext) +} + +func (op *httpsGetDrainingStatusOp) processResult(_ *opEngineExecContext) error { + var allErrs error + for host, result := range op.clusterHTTPRequest.ResultCollection { + op.logResponse(host, result) + + if result.isUnauthorizedRequest() { + return fmt.Errorf("[%s] wrong password/certificate for https service on host %s", + op.name, host) + } + + if result.isPassing() { + // decode the json-format response + // The successful response will contain all subclusters' draining status: + /* + { + "draining_status_list": [ + { + "subcluster_name": "default_subcluster", + "drain_status": "pausing", + "redirect_to": "" + } + ] + } + */ + resp := DrainingStatusList{} + err := op.parseAndCheckResponse(host, result.content, &resp) + if err != nil { + allErrs = errors.Join(allErrs, err) + continue + } + + // collect draining status + *op.dsList = resp + return nil + } + allErrs = errors.Join(allErrs, result.err) + } + return appendHTTPSFailureError(allErrs) +} + +func (op *httpsGetDrainingStatusOp) finalize(_ *opEngineExecContext) error { + return nil +} diff --git a/vclusterops/https_get_nodes_info_op.go b/vclusterops/https_get_nodes_info_op.go index 66e462b..3eb9065 100644 --- a/vclusterops/https_get_nodes_info_op.go +++ b/vclusterops/https_get_nodes_info_op.go @@ -113,6 +113,13 @@ func (op *httpsGetNodesInfoOp) processResult(_ *opEngineExecContext) error { for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) + // A host may have precondition failed, such as + // "Local node has not joined cluster yet, HTTP server will accept connections when the node has joined the cluster" + // In this case, we skip use the information from that host + if result.hasPreconditionFailed() { + continue + } + if result.isUnauthorizedRequest() { detail := fmt.Sprintf("[%s] wrong password/certificate for https service on host %s", op.name, host) diff --git a/vclusterops/https_get_up_nodes_op.go b/vclusterops/https_get_up_nodes_op.go index 905ed26..18e14e1 100644 --- a/vclusterops/https_get_up_nodes_op.go +++ b/vclusterops/https_get_up_nodes_op.go @@ -209,7 +209,8 @@ func isCompleteScanRequired(cmdType CmdType) bool { cmdType == UnsandboxSCCmd || cmdType == StopSubclusterCmd || cmdType == ManageConnectionDrainingCmd || cmdType == SetConfigurationParameterCmd || - cmdType == GetConfigurationParameterCmd + cmdType == GetConfigurationParameterCmd || + cmdType == GetDrainingStatusCmd } func (op *httpsGetUpNodesOp) finalize(_ *opEngineExecContext) error { @@ -393,7 +394,8 @@ func (op *httpsGetUpNodesOp) requiresSandboxInfo() bool { return op.cmdType == ManageConnectionDrainingCmd || op.cmdType == SetConfigurationParameterCmd || op.cmdType == GetConfigurationParameterCmd || - op.cmdType == StopDBCmd + op.cmdType == StopDBCmd || + op.cmdType == GetDrainingStatusCmd } func (op *httpsGetUpNodesOp) collectUnsandboxingHosts(nodesStates nodesStateInfo, sandboxInfo map[string]string) { diff --git a/vclusterops/https_mark_nodes_ephemeral_op.go b/vclusterops/https_mark_nodes_ephemeral_op.go index e794e9e..aece2eb 100644 --- a/vclusterops/https_mark_nodes_ephemeral_op.go +++ b/vclusterops/https_mark_nodes_ephemeral_op.go @@ -51,7 +51,7 @@ func (op *httpsMarkEphemeralNodeOp) setupClusterHTTPRequest(hosts []string) erro for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("nodes/" + op.targetNodeName + "/ephemeral") + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + op.targetNodeName + "/ephemeral") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_poll_node_state_op.go b/vclusterops/https_poll_node_state_op.go index 707a74b..3a34589 100644 --- a/vclusterops/https_poll_node_state_op.go +++ b/vclusterops/https_poll_node_state_op.go @@ -97,7 +97,7 @@ func (op *httpsPollNodeStateOp) setupClusterHTTPRequest(hosts []string) error { httpRequest := hostHTTPRequest{} httpRequest.Method = GetMethod httpRequest.Timeout = op.httpRequestTimeout - httpRequest.buildHTTPSEndpoint("nodes/" + host) + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + host) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName @@ -189,9 +189,7 @@ func (op *httpsPollNodeStateOp) shouldStopPolling() (bool, error) { } } else { // if HTTPS endpoint cannot function well on any of the hosts, we do not want to retry polling - return true, fmt.Errorf("[%s] expect one node's information, but got %d nodes' information"+ - " from HTTPS /v1/nodes/ endpoint on host %s", - op.name, len(nodesInformation.NodeList), host) + return true, fmt.Errorf(util.NodeInfoCountMismatch, op.name, len(nodesInformation.NodeList), host) } } } diff --git a/vclusterops/https_poll_subcluster_node_state_op.go b/vclusterops/https_poll_subcluster_node_state_op.go index a4e2aca..488e391 100644 --- a/vclusterops/https_poll_subcluster_node_state_op.go +++ b/vclusterops/https_poll_subcluster_node_state_op.go @@ -59,13 +59,16 @@ func makeHTTPSPollSubclusterNodeStateOp(scName string, return op, nil } -func makeHTTPSPollSubclusterNodeStateUpOp(hosts []string, scName string, +func makeHTTPSPollSubclusterNodeStateUpOp(hosts []string, scName string, timeout int, useHTTPPassword bool, userName string, httpsPassword *string) (httpsPollSubclusterNodeStateOp, error) { op, err := makeHTTPSPollSubclusterNodeStateOp(scName, useHTTPPassword, userName, httpsPassword) op.checkDown = false op.description += " to come up" op.hosts = hosts + if timeout != 0 { + op.timeout = timeout + } return op, err } @@ -89,7 +92,7 @@ func (op *httpsPollSubclusterNodeStateOp) setupClusterHTTPRequest(hosts []string httpRequest := hostHTTPRequest{} httpRequest.Method = GetMethod httpRequest.Timeout = defaultHTTPSRequestTimeoutSeconds - httpRequest.buildHTTPSEndpoint("nodes/" + host) + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + host) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_promote_subcluster_op.go b/vclusterops/https_promote_subcluster_op.go index 9ca0279..943f592 100644 --- a/vclusterops/https_promote_subcluster_op.go +++ b/vclusterops/https_promote_subcluster_op.go @@ -59,7 +59,7 @@ func (op *httpsPromoteSubclusterOp) setupClusterHTTPRequest(hosts []string) erro for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/promote") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/promote") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_re_ip_op.go b/vclusterops/https_re_ip_op.go index e257f2b..24255c4 100644 --- a/vclusterops/https_re_ip_op.go +++ b/vclusterops/https_re_ip_op.go @@ -85,7 +85,7 @@ func (op *httpsReIPOp) setupClusterHTTPRequest(hostsToReIP []string) error { if !ok { return fmt.Errorf("[%s] cannot find node information for address %s", op.name, host) } - httpRequest.buildHTTPSEndpoint("nodes/" + nodesInfo.NodeName + "/ip") + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + nodesInfo.NodeName + "/ip") httpRequest.QueryParams = make(map[string]string) httpRequest.QueryParams["host"] = nodesInfo.TargetAddress httpRequest.QueryParams["control-host"] = nodesInfo.TargetControlAddress diff --git a/vclusterops/https_rebalance_subcluster_shards_op.go b/vclusterops/https_rebalance_subcluster_shards_op.go index cd28c47..391b5ff 100644 --- a/vclusterops/https_rebalance_subcluster_shards_op.go +++ b/vclusterops/https_rebalance_subcluster_shards_op.go @@ -55,7 +55,7 @@ func (op *httpsRebalanceSubclusterShardsOp) setupClusterHTTPRequest(hosts []stri for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/rebalance") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/rebalance") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_rename_subcluster_op.go b/vclusterops/https_rename_subcluster_op.go index e70f08b..ba00d53 100644 --- a/vclusterops/https_rename_subcluster_op.go +++ b/vclusterops/https_rename_subcluster_op.go @@ -61,7 +61,7 @@ func (op *httpsRenameSubclusterOp) setupClusterHTTPRequest(hosts []string) error for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PutMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/rename") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/rename") httpRequest.QueryParams = make(map[string]string) httpRequest.QueryParams["name"] = op.newSCName diff --git a/vclusterops/https_sandbox_subcluster_op.go b/vclusterops/https_sandbox_subcluster_op.go index 04cae81..3be2008 100644 --- a/vclusterops/https_sandbox_subcluster_op.go +++ b/vclusterops/https_sandbox_subcluster_op.go @@ -33,11 +33,12 @@ type httpsSandboxingOp struct { Imeta bool Sls bool ForUpgrade bool + sbHosts *[]string } // This op is used to sandbox the given subcluster `scName` as `sandboxName` func makeHTTPSandboxingOp(logger vlog.Printer, scName, sandboxName string, useHTTPPassword bool, - userName string, httpsPassword *string, saveRp, imeta, sls, forUpgrade bool) (httpsSandboxingOp, error) { + userName string, httpsPassword *string, saveRp, imeta, sls, forUpgrade bool, hosts *[]string) (httpsSandboxingOp, error) { op := httpsSandboxingOp{} op.name = "HTTPSSansboxingOp" op.description = "Convert subcluster into sandbox in catalog system" @@ -49,6 +50,7 @@ func makeHTTPSandboxingOp(logger vlog.Printer, scName, sandboxName string, useHT op.Imeta = imeta op.Sls = sls op.ForUpgrade = forUpgrade + op.sbHosts = hosts if useHTTPPassword { err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) @@ -67,7 +69,7 @@ func (op *httpsSandboxingOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/sandbox") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/sandbox") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName @@ -155,6 +157,9 @@ func (op *httpsSandboxingOp) processResult(_ *opEngineExecContext) error { return allErrs } -func (op *httpsSandboxingOp) finalize(_ *opEngineExecContext) error { +func (op *httpsSandboxingOp) finalize(execContext *opEngineExecContext) error { + for _, vnode := range execContext.scNodesInfo { + *op.sbHosts = append(*op.sbHosts, vnode.Address) + } return nil } diff --git a/vclusterops/https_start_replication_op.go b/vclusterops/https_start_replication_op.go index e22bcb8..2f590cc 100644 --- a/vclusterops/https_start_replication_op.go +++ b/vclusterops/https_start_replication_op.go @@ -26,29 +26,27 @@ import ( type httpsStartReplicationOp struct { opBase opHTTPSBase + TargetDatabaseOptions hostRequestBodyMap map[string]string sourceDB string - targetHosts string - targetDB string + targetHost string sandbox string - targetUserName string - targetPassword *string tlsConfig string vdb *VCoordinationDatabase } func makeHTTPSStartReplicationOp(dbName string, sourceHosts []string, sourceUseHTTPPassword bool, sourceUserName string, - sourceHTTPPassword *string, targetUseHTTPPassword bool, targetDB, targetUserName, targetHosts string, - targetHTTPSPassword *string, tlsConfig, sandbox string, vdb *VCoordinationDatabase) (httpsStartReplicationOp, error) { + sourceHTTPPassword *string, targetUseHTTPPassword bool, targetDBOpt *TargetDatabaseOptions, + targetHost string, tlsConfig, sandbox string, vdb *VCoordinationDatabase) (httpsStartReplicationOp, error) { op := httpsStartReplicationOp{} op.name = "HTTPSStartReplicationOp" op.description = "Start database replication" op.sourceDB = dbName op.hosts = sourceHosts op.useHTTPPassword = sourceUseHTTPPassword - op.targetDB = targetDB - op.targetHosts = targetHosts + op.TargetDB = targetDBOpt.TargetDB + op.targetHost = targetHost op.tlsConfig = tlsConfig op.sandbox = sandbox op.vdb = vdb @@ -62,12 +60,12 @@ func makeHTTPSStartReplicationOp(dbName string, sourceHosts []string, op.httpsPassword = sourceHTTPPassword } if targetUseHTTPPassword { - err := util.ValidateUsernameAndPassword(op.name, targetUseHTTPPassword, targetUserName) + err := util.ValidateUsernameAndPassword(op.name, targetUseHTTPPassword, targetDBOpt.TargetUserName) if err != nil { return op, err } - op.targetUserName = targetUserName - op.targetPassword = targetHTTPSPassword + op.TargetUserName = targetDBOpt.TargetUserName + op.TargetPassword = targetDBOpt.TargetPassword } return op, nil @@ -86,10 +84,10 @@ func (op *httpsStartReplicationOp) setupRequestBody(hosts []string) error { for _, host := range hosts { replicateData := replicateRequestData{} - replicateData.TargetHost = op.targetHosts - replicateData.TargetDB = op.targetDB - replicateData.TargetUserName = op.targetUserName - replicateData.TargetPassword = op.targetPassword + replicateData.TargetHost = op.targetHost + replicateData.TargetDB = op.TargetDB + replicateData.TargetUserName = op.TargetUserName + replicateData.TargetPassword = op.TargetPassword replicateData.TLSConfig = op.tlsConfig dataBytes, err := json.Marshal(replicateData) diff --git a/vclusterops/https_stop_db_op.go b/vclusterops/https_stop_db_op.go index 37e102b..3821e7a 100644 --- a/vclusterops/https_stop_db_op.go +++ b/vclusterops/https_stop_db_op.go @@ -21,6 +21,7 @@ import ( "regexp" "strconv" + mapset "github.com/deckarep/golang-set/v2" "github.com/vertica/vcluster/vclusterops/util" ) @@ -87,6 +88,7 @@ func (op *httpsStopDBOp) prepare(execContext *opEngineExecContext) error { sandboxOnly := false var mainHost string var hosts []string + sandboxes := mapset.NewSet[string]() for h, sb := range execContext.upHostsToSandboxes { if sb == op.sandbox && sb != "" { // stop db only on sandbox @@ -96,7 +98,8 @@ func (op *httpsStopDBOp) prepare(execContext *opEngineExecContext) error { } if sb == "" { mainHost = h - } else { + } else if !sandboxes.Contains(sb) { + sandboxes.Add(sb) hosts = append(hosts, h) } } @@ -124,6 +127,7 @@ func (op *httpsStopDBOp) execute(execContext *opEngineExecContext) error { func (op *httpsStopDBOp) processResult(_ *opEngineExecContext) error { var allErrs error re := regexp.MustCompile(`Set subcluster \(.*\) to draining state.*`) + regHang := regexp.MustCompile(`context\s+deadline\s+exceeded\s+\(Client\.Timeout\s+exceeded\s+while\s+awaiting\s+headers\)`) for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) @@ -135,6 +139,11 @@ func (op *httpsStopDBOp) processResult(_ *opEngineExecContext) error { } if !result.isPassing() { allErrs = errors.Join(allErrs, result.err) + if regHang.MatchString(result.err.Error()) { + err := fmt.Errorf("hint: use NMA endpoint /v1/vertica-process/signal?signal_type=kill to terminate a hanging Vertica " + + "process on the failed host") + allErrs = errors.Join(allErrs, err) + } continue } diff --git a/vclusterops/https_stop_node_op.go b/vclusterops/https_stop_node_op.go index 335b4fa..e0fc34d 100644 --- a/vclusterops/https_stop_node_op.go +++ b/vclusterops/https_stop_node_op.go @@ -72,7 +72,7 @@ func (op *httpsStopNodeOp) setupClusterHTTPRequest(hosts, nodenames []string) er for i, nodename := range nodenames { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("nodes/" + nodename + "/shutdown") + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + nodename + util.ShutDownEndpoint) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName diff --git a/vclusterops/https_stop_subcluster_op.go b/vclusterops/https_stop_subcluster_op.go index 31959b4..509cae0 100644 --- a/vclusterops/https_stop_subcluster_op.go +++ b/vclusterops/https_stop_subcluster_op.go @@ -64,7 +64,7 @@ func (op *httpsStopSCOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/shutdown") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + util.ShutDownEndpoint) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName @@ -102,6 +102,11 @@ func (op *httpsStopSCOp) processResult(_ *opEngineExecContext) error { for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) + // EOF is expected in node shutdown: we expect the node's HTTPS service to go down quickly + // and the Server HTTPS service does not guarantee that the response being sent back to the client before it closes + if result.isEOF() { + continue + } if !result.isPassing() { allErrs = errors.Join(allErrs, result.err) continue diff --git a/vclusterops/https_unsandbox_subcluster_op.go b/vclusterops/https_unsandbox_subcluster_op.go index 25afce6..7c7c969 100644 --- a/vclusterops/https_unsandbox_subcluster_op.go +++ b/vclusterops/https_unsandbox_subcluster_op.go @@ -27,16 +27,18 @@ type httpsUnsandboxingOp struct { opHTTPSBase hostRequestBodyMap map[string]string scName string + scHosts *[]string } // This op is used to unsandbox the given subcluster `scName` func makeHTTPSUnsandboxingOp(scName string, - useHTTPPassword bool, userName string, httpsPassword *string) (httpsUnsandboxingOp, error) { + useHTTPPassword bool, userName string, httpsPassword *string, hosts *[]string) (httpsUnsandboxingOp, error) { op := httpsUnsandboxingOp{} op.name = "HTTPSUnsansboxingOp" op.description = "Convert sandboxed subcluster into regular subcluster in catalog" op.useHTTPPassword = useHTTPPassword op.scName = scName + op.scHosts = hosts if useHTTPPassword { err := util.ValidateUsernameAndPassword(op.name, useHTTPPassword, userName) @@ -55,7 +57,7 @@ func (op *httpsUnsandboxingOp) setupClusterHTTPRequest(hosts []string) error { for _, host := range hosts { httpRequest := hostHTTPRequest{} httpRequest.Method = PostMethod - httpRequest.buildHTTPSEndpoint("subclusters/" + op.scName + "/unsandbox") + httpRequest.buildHTTPSEndpoint(util.SubclustersEndpoint + op.scName + "/unsandbox") if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName @@ -139,6 +141,10 @@ func (op *httpsUnsandboxingOp) processResult(_ *opEngineExecContext) error { return allErrs } -func (op *httpsUnsandboxingOp) finalize(_ *opEngineExecContext) error { +func (op *httpsUnsandboxingOp) finalize(execContext *opEngineExecContext) error { + *op.scHosts = []string{} + for _, vnode := range execContext.scNodesInfo { + *op.scHosts = append(*op.scHosts, vnode.Address) + } return nil } diff --git a/vclusterops/https_update_node_state_op.go b/vclusterops/https_update_node_state_op.go index 9892924..a8a8703 100644 --- a/vclusterops/https_update_node_state_op.go +++ b/vclusterops/https_update_node_state_op.go @@ -56,7 +56,7 @@ func (op *httpsUpdateNodeStateOp) setupClusterHTTPRequest(hosts []string) error httpRequest := hostHTTPRequest{} httpRequest.Method = GetMethod httpRequest.Timeout = op.httpRequestTimeout - httpRequest.buildHTTPSEndpoint("nodes/" + host) + httpRequest.buildHTTPSEndpoint(util.NodesEndpoint + host) if op.useHTTPPassword { httpRequest.Password = op.httpsPassword httpRequest.Username = op.userName @@ -87,6 +87,19 @@ func (op *httpsUpdateNodeStateOp) processResult(execContext *opEngineExecContext for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) + // A host may have precondition failed, such as + // "Local node has not joined cluster yet, HTTP server will accept connections when the node has joined the cluster" + // In this case, we mark the node status as UNKNOWN + if result.hasPreconditionFailed() { + vnode, ok := op.vdb.HostNodeMap[host] + if !ok { + return fmt.Errorf("cannot find host %s in vdb", host) + } + vnode.State = util.NodeUnknownState + + continue + } + if result.isUnauthorizedRequest() { op.logger.PrintError("[%s] unauthorized request: %s", op.name, result.content) execContext.hostsWithWrongAuth = append(execContext.hostsWithWrongAuth, host) @@ -124,11 +137,10 @@ func (op *httpsUpdateNodeStateOp) processResult(execContext *opEngineExecContext return fmt.Errorf("cannot find host %s in vdb", host) } vnode.State = nodeInfo.State + vnode.IsPrimary = nodeInfo.IsPrimary } else { // if the result format is wrong on any of the hosts, we should throw an error - return fmt.Errorf("[%s] expect one node's information, but got %d nodes' information"+ - " from HTTPS /v1/nodes/ endpoint on host %s", - op.name, len(nodesInformation.NodeList), host) + return fmt.Errorf(util.NodeInfoCountMismatch, op.name, len(nodesInformation.NodeList), host) } } diff --git a/vclusterops/manage_connection_draining.go b/vclusterops/manage_connection_draining.go index 9657b74..94da4f4 100644 --- a/vclusterops/manage_connection_draining.go +++ b/vclusterops/manage_connection_draining.go @@ -23,9 +23,10 @@ import ( ) const ( - ActionPause ConnectionDrainingAction = "pause" - ActionRedirect ConnectionDrainingAction = "redirect" - ActionResume ConnectionDrainingAction = "resume" + ActionPause ConnectionDrainingAction = "pause" + ActionRedirect ConnectionDrainingAction = "redirect" + ActionResume ConnectionDrainingAction = "resume" + hostRedirectMsg = "hostname to redirect to must not be empty when manage connection draining action is %q" ) type ConnectionDrainingAction string @@ -93,10 +94,8 @@ func (opt *VManageConnectionDrainingOptions) validateExtraOptions(logger vlog.Pr } if opt.Action == ActionRedirect { if opt.RedirectHostname == "" { - logger.PrintError("hostname to redirect to must not be empty"+ - " when manage connection draining action is %q", ActionRedirect) - return fmt.Errorf("hostname to redirect to must not be empty"+ - " when manage connection draining action is %q", ActionRedirect) + logger.PrintError(hostRedirectMsg, ActionRedirect) + return fmt.Errorf(hostRedirectMsg, ActionRedirect) } } return nil diff --git a/vclusterops/nma_read_catalog_editor_op.go b/vclusterops/nma_read_catalog_editor_op.go index 9ed2f18..b7dd35c 100644 --- a/vclusterops/nma_read_catalog_editor_op.go +++ b/vclusterops/nma_read_catalog_editor_op.go @@ -31,6 +31,13 @@ type nmaReadCatalogEditorOp struct { catalogPathMap map[string]string firstStartAfterRevive bool // used for start_db only + + // for passing state between execute() and finalize() and + // should not be initialized by the factory function + allErrs error + hostsWithLatestCatalog []string + latestNmaVDB nmaVDatabase + bestHost string } // makeNMAReadCatalogEditorOpWithInitiator creates an op to read catalog editor info. @@ -89,11 +96,13 @@ func (op *nmaReadCatalogEditorOp) prepare(execContext *opEngineExecContext) erro // if the initiator host(s) are given, only build map for these hosts op.catalogPathMap = make(map[string]string) if len(op.initiator) == 0 { + op.logger.Info("Using all hosts in host to node map", "op name", op.name) op.hosts = maps.Keys(op.vdb.HostNodeMap) for host, vnode := range op.vdb.HostNodeMap { op.catalogPathMap[host] = vnode.CatalogPath } } else { + op.logger.Info("Using initiator hosts only", "op name", op.name) for _, host := range op.initiator { op.hosts = append(op.hosts, host) vnode, ok := op.vdb.HostNodeMap[host] @@ -105,6 +114,12 @@ func (op *nmaReadCatalogEditorOp) prepare(execContext *opEngineExecContext) erro } } + if len(op.hosts) == 0 { + op.skipExecute = true + op.logger.Info("No hosts found, skipping execution", "op name", op.name) + return nil + } + execContext.dispatcher.setup(op.hosts) return op.setupClusterHTTPRequest(op.hosts) @@ -118,10 +133,6 @@ func (op *nmaReadCatalogEditorOp) execute(execContext *opEngineExecContext) erro return op.processResult(execContext) } -func (op *nmaReadCatalogEditorOp) finalize(_ *opEngineExecContext) error { - return nil -} - type nmaVersions struct { Global json.Number `json:"global"` Local json.Number `json:"local"` @@ -179,12 +190,8 @@ type nmaVDatabase struct { PrimaryNodeCount uint `json:",omitempty"` } -func (op *nmaReadCatalogEditorOp) processResult(execContext *opEngineExecContext) error { - var allErrs error - var hostsWithLatestCatalog []string +func (op *nmaReadCatalogEditorOp) processResult(_ *opEngineExecContext) error { var maxGlobalVersion int64 - var latestNmaVDB nmaVDatabase - var bestHost string for host, result := range op.clusterHTTPRequest.ResultCollection { op.logResponse(host, result) @@ -194,7 +201,7 @@ func (op *nmaReadCatalogEditorOp) processResult(execContext *opEngineExecContext if err != nil { err = fmt.Errorf("[%s] fail to parse result on host %s, details: %w", op.name, host, err) - allErrs = errors.Join(allErrs, err) + op.allErrs = errors.Join(op.allErrs, err) continue } @@ -216,17 +223,17 @@ func (op *nmaReadCatalogEditorOp) processResult(execContext *opEngineExecContext if err != nil { err = fmt.Errorf("[%s] fail to convert spread Version to integer %s, details: %w", op.name, host, err) - allErrs = errors.Join(allErrs, err) + op.allErrs = errors.Join(op.allErrs, err) continue } if globalVersion > maxGlobalVersion { - hostsWithLatestCatalog = []string{host} + op.hostsWithLatestCatalog = []string{host} maxGlobalVersion = globalVersion // save the latest NMAVDatabase to execContext - latestNmaVDB = nmaVDB - bestHost = host + op.latestNmaVDB = nmaVDB + op.bestHost = host } else if globalVersion == maxGlobalVersion { - hostsWithLatestCatalog = append(hostsWithLatestCatalog, host) + op.hostsWithLatestCatalog = append(op.hostsWithLatestCatalog, host) } } else { // if this is not the first time of start_db after revive_db, @@ -242,20 +249,28 @@ func (op *nmaReadCatalogEditorOp) processResult(execContext *opEngineExecContext } } - allErrs = errors.Join(allErrs, result.err) + op.allErrs = errors.Join(op.allErrs, result.err) } } + // let finalize() handle error conditions, in case this function is skipped + return nil +} + +// finalize contains the final logic that would otherwise be in execute, but since execute +// shouldn't be called with no hosts in the host list, doing the work here allows handling +// the errors whether or not execute was called. +func (op *nmaReadCatalogEditorOp) finalize(execContext *opEngineExecContext) error { // save hostsWithLatestCatalog to execContext - if len(hostsWithLatestCatalog) == 0 { + if len(op.hostsWithLatestCatalog) == 0 { err := fmt.Errorf("[%s] cannot find any host with the latest catalog", op.name) - allErrs = errors.Join(allErrs, err) - return allErrs + op.allErrs = errors.Join(op.allErrs, err) + return op.allErrs } - execContext.hostsWithLatestCatalog = hostsWithLatestCatalog + execContext.hostsWithLatestCatalog = op.hostsWithLatestCatalog // save the latest nmaVDB to execContext - execContext.nmaVDatabase = latestNmaVDB - op.logger.PrintInfo("reporting results as obtained from the host [%s] ", bestHost) - return allErrs + execContext.nmaVDatabase = op.latestNmaVDB + op.logger.PrintInfo("reporting results as obtained from the host [%s] ", op.bestHost) + return op.allErrs } diff --git a/vclusterops/nma_save_restore_points_op.go b/vclusterops/nma_save_restore_points_op.go new file mode 100644 index 0000000..a549226 --- /dev/null +++ b/vclusterops/nma_save_restore_points_op.go @@ -0,0 +1,132 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/vertica/vcluster/vclusterops/vlog" +) + +type nmaSaveRestorePointsOp struct { + opBase + dbName string + username string + archiveName string + sandbox string +} + +type saveRestorePointsRequestData struct { + DBName string `json:"dbname"` + ArchiveName string `json:"archive_name"` + UserName string `json:"username"` +} + +// This op is used to save restore points in a database +func makeNMASaveRestorePointsOp(logger vlog.Printer, hosts []string, dbName, username string, + archiveName string, sandbox string) nmaSaveRestorePointsOp { + return nmaSaveRestorePointsOp{ + opBase: opBase{ + name: "NMASaveRestorePointsOp", + description: "Run save restore point query", + logger: logger.WithName("NMASaveRestorePointsOp"), + hosts: hosts, + }, + dbName: dbName, + username: username, + archiveName: archiveName, + sandbox: sandbox, + } +} + +// make https json data +func (op *nmaSaveRestorePointsOp) setupRequestBody() (map[string]string, error) { + hostRequestBodyMap := make(map[string]string, len(op.hosts)) + for _, host := range op.hosts { + requestData := saveRestorePointsRequestData{} + requestData.DBName = op.dbName + requestData.ArchiveName = op.archiveName + requestData.UserName = op.username + + dataBytes, err := json.Marshal(requestData) + if err != nil { + return nil, fmt.Errorf("[%s] fail to marshal request data to JSON string, detail %w", op.name, err) + } + hostRequestBodyMap[host] = string(dataBytes) + } + return hostRequestBodyMap, nil +} + +func (op *nmaSaveRestorePointsOp) setupClusterHTTPRequest(hostRequestBodyMap map[string]string) error { + for host, requestBody := range hostRequestBodyMap { + httpRequest := hostHTTPRequest{} + httpRequest.Method = PostMethod + httpRequest.buildNMAEndpoint("restore-points/save") + httpRequest.RequestData = requestBody + op.clusterHTTPRequest.RequestCollection[host] = httpRequest + } + return nil +} + +func (op *nmaSaveRestorePointsOp) prepare(execContext *opEngineExecContext) error { + hostRequestBody, err := op.setupRequestBody() + if err != nil { + return err + } + execContext.dispatcher.setup(op.hosts) + return op.setupClusterHTTPRequest(hostRequestBody) +} + +func (op *nmaSaveRestorePointsOp) execute(execContext *opEngineExecContext) error { + if err := op.runExecute(execContext); err != nil { + return err + } + + return op.processResult(execContext) +} + +func (op *nmaSaveRestorePointsOp) finalize(_ *opEngineExecContext) error { + return nil +} + +/* +Sample response from the NMA restore-points endpoint: +RespStr: "" (status code:200) +*/ +func (op *nmaSaveRestorePointsOp) processResult(_ *opEngineExecContext) error { + var allErrs error + for host, result := range op.clusterHTTPRequest.ResultCollection { + op.logResponse(host, result) + if result.isUnauthorizedRequest() { + return fmt.Errorf("[%s] wrong certificate for NMA service on host %s", + op.name, host) + } + if result.isPassing() { + var responseObj RestorePoint + err := op.parseAndCheckResponse(host, result.content, &responseObj) + if err != nil { + allErrs = errors.Join(allErrs, err) + continue + } + op.logger.PrintInfo("OP Name: [%s], response: %v", op.name, result.content) + return nil + } + allErrs = errors.Join(allErrs, result.err) + } + return allErrs +} diff --git a/vclusterops/nma_show_restore_points_op.go b/vclusterops/nma_show_restore_points_op.go index 318682c..74c8e27 100644 --- a/vclusterops/nma_show_restore_points_op.go +++ b/vclusterops/nma_show_restore_points_op.go @@ -188,7 +188,6 @@ func (op *nmaShowRestorePointsOp) processResult(execContext *opEngineExecContext allErrs = errors.Join(allErrs, err) continue } - op.logger.PrintInfo("[%s] response: %v", op.name, result.content) execContext.restorePoints = responseObj return nil diff --git a/vclusterops/nma_show_restore_points_op_test.go b/vclusterops/nma_show_restore_points_op_test.go index e1280dc..deaa665 100644 --- a/vclusterops/nma_show_restore_points_op_test.go +++ b/vclusterops/nma_show_restore_points_op_test.go @@ -22,6 +22,10 @@ import ( "github.com/vertica/vcluster/vclusterops/vlog" ) +const archName = `"archive_name":"` +const archID = `"archive_id":"` +const archIndex = `"archive_index":"` + func TestShowRestorePointsRequestBody(t *testing.T) { const hostName = "host1" const dbName = "testDB" @@ -55,9 +59,9 @@ func TestShowRestorePointsRequestBody(t *testing.T) { assert.Len(t, requestBody, 1) assert.Contains(t, requestBody, hostName) hostReq = requestBody[hostName] - assert.Contains(t, hostReq, `"archive_name":"`+archiveName+`"`) - assert.Contains(t, hostReq, `"archive_id":"`+archiveID+`"`) - assert.Contains(t, hostReq, `"archive_index":"`+archiveIndex+`"`) + assert.Contains(t, hostReq, archName+archiveName+`"`) + assert.Contains(t, hostReq, archID+archiveID+`"`) + assert.Contains(t, hostReq, archIndex+archiveIndex+`"`) assert.Contains(t, hostReq, `"start_timestamp":"`+startTimestamp+`"`) assert.Contains(t, hostReq, `"end_timestamp":"`+endTimestamp+`"`) @@ -73,9 +77,9 @@ func TestShowRestorePointsRequestBody(t *testing.T) { assert.Len(t, requestBody, 1) assert.Contains(t, requestBody, hostName) hostReq = requestBody[hostName] - assert.Contains(t, hostReq, `"archive_name":"`+archiveName+`"`) - assert.Contains(t, hostReq, `"archive_id":"`+archiveID+`"`) - assert.Contains(t, hostReq, `"archive_index":"`+archiveIndex+`"`) + assert.Contains(t, hostReq, archName+archiveName+`"`) + assert.Contains(t, hostReq, archID+archiveID+`"`) + assert.Contains(t, hostReq, archIndex+archiveIndex+`"`) assert.NotContains(t, hostReq, `"start_timestamp"`) assert.NotContains(t, hostReq, `"end_timestamp"`) } diff --git a/vclusterops/nma_vertica_version_op_test.go b/vclusterops/nma_vertica_version_op_test.go index ed08605..fbb4bfa 100644 --- a/vclusterops/nma_vertica_version_op_test.go +++ b/vclusterops/nma_vertica_version_op_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" ) +const foundMismatchVers = "Found mismatched versions: " + func TestLogCheckVersionMatch(t *testing.T) { op := makeNMACheckVerticaVersionOp(nil, true, true) op.HasIncomingSCNames = true @@ -44,9 +46,9 @@ func TestLogCheckVersionMatch(t *testing.T) { } err = op.logCheckVersionMatch() assert.Error(t, err) - expectedErr1 := "Found mismatched versions: " + + expectedErr1 := foundMismatchVers + "[Vertica Analytic Database v24.1.0] and [Vertica Analytic Database v23.4.0] in subcluster [default_subcluster]" - expectedErr2 := "Found mismatched versions: " + + expectedErr2 := foundMismatchVers + "[Vertica Analytic Database v23.4.0] and [Vertica Analytic Database v24.1.0] in subcluster [default_subcluster]" isExpected := strings.Contains(err.Error(), expectedErr1) || strings.Contains(err.Error(), expectedErr2) assert.Equal(t, true, isExpected) @@ -90,9 +92,9 @@ func TestLogCheckVersionMatch(t *testing.T) { } err = op.logCheckVersionMatch() assert.Error(t, err) - expectedErr1 = "Found mismatched versions: " + + expectedErr1 = foundMismatchVers + "[Vertica Analytic Database v23.4.0] and [Vertica Analytic Database v23.3.0] in subcluster [sc2]" - expectedErr2 = "Found mismatched versions: " + + expectedErr2 = foundMismatchVers + "[Vertica Analytic Database v23.3.0] and [Vertica Analytic Database v23.4.0] in subcluster [sc2]" isExpected = strings.Contains(err.Error(), expectedErr1) || strings.Contains(err.Error(), expectedErr2) assert.Equal(t, true, isExpected) diff --git a/vclusterops/poll_sc_state.go b/vclusterops/poll_sc_state.go new file mode 100644 index 0000000..8d0d0d6 --- /dev/null +++ b/vclusterops/poll_sc_state.go @@ -0,0 +1,128 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +type VPollSubclusterStateOptions struct { + DatabaseOptions + + SkipOptionsValidation bool + SCName string + Timeout int // timeout for polling, 0 means default +} + +func VPollSubclusterStateOptionsFactory() VPollSubclusterStateOptions { + options := VPollSubclusterStateOptions{} + // set default values to the params + options.setDefaultValues() + + return options +} + +func (options *VPollSubclusterStateOptions) setDefaultValues() { + options.DatabaseOptions.setDefaultValues() +} + +func (options *VPollSubclusterStateOptions) validateParseOptions(logger vlog.Printer) error { + err := options.validateBaseOptions(PollSubclusterStateCmd, logger) + if err != nil { + return err + } + + return nil +} + +func (options *VPollSubclusterStateOptions) analyzeOptions() (err error) { + // resolve RawHosts to be IP addresses + if len(options.RawHosts) > 0 { + options.Hosts, err = util.ResolveRawHostsToAddresses(options.RawHosts, options.IPv6) + if err != nil { + return err + } + } + + return nil +} + +func (options *VPollSubclusterStateOptions) validateAnalyzeOptions(logger vlog.Printer) error { + if options.SkipOptionsValidation { + return nil + } + if err := options.validateParseOptions(logger); err != nil { + return err + } + return options.analyzeOptions() +} + +// VPollSubclusterState waits for the given nodes to be up or down +func (vcc VClusterCommands) VPollSubclusterState(options *VPollSubclusterStateOptions) error { + /* + * - Validate Options + * - Produce Instructions + * - Create a VClusterOpEngine + * - Give the instructions to the VClusterOpEngine to run + */ + + err := options.validateAnalyzeOptions(vcc.Log) + if err != nil { + return err + } + + instructions, err := vcc.producePollSubclusterStateInstructions(options) + if err != nil { + return fmt.Errorf("fail to produce instructions: %w", err) + } + + clusterOpEngine := makeClusterOpEngine(instructions, options) + + err = clusterOpEngine.run(vcc.Log) + if err != nil { + return fmt.Errorf("failed to poll for host status %v: %w", options.Hosts, err) + } + + return nil +} + +// producePollSubclusterStateInstructions will build a list of instructions to execute +// +// The generated instructions will later perform the following operations: +// - Poll for the subcluster hosts to be all UP or DOWN +func (vcc *VClusterCommands) producePollSubclusterStateInstructions(options *VPollSubclusterStateOptions, +) (instructions []clusterOp, err error) { + // when password is specified, we will use username/password to call https endpoints + usePassword := false + if options.Password != nil { + usePassword = true + if err = options.validateUserName(vcc.Log); err != nil { + return + } + } + + httpsPollSubclusterNodeOp, err := makeHTTPSPollSubclusterNodeStateUpOp(options.Hosts, options.SCName, options.Timeout, + usePassword, options.UserName, options.Password) + if err != nil { + return + } + + instructions = append(instructions, &httpsPollSubclusterNodeOp) + return +} diff --git a/vclusterops/re_ip.go b/vclusterops/re_ip.go index 5bf194c..832088c 100644 --- a/vclusterops/re_ip.go +++ b/vclusterops/re_ip.go @@ -161,19 +161,19 @@ func (vcc VClusterCommands) VReIP(options *VReIPOptions) error { // retrieve database information from cluster_config.json for Eon databases if options.IsEon { const warningMsg = " for an Eon database, re_ip after revive_db could fail " + - "because we cannot retrieve the correct database information" + util.DBInfo if options.CommunalStorageLocation != "" { vdb, e := options.getVDBWhenDBIsDown(vcc) if e != nil { // show a warning message if we cannot get VDB from a down database - vcc.Log.PrintWarning("failed to retrieve the communal storage location" + warningMsg) + vcc.Log.PrintWarning(util.CommStorageFail + warningMsg) } pVDB = &vdb } else { // When communal storage location is missing, we only log a debug message // because re-ip only fails in between revive_db and first start_db. // We should not ran re-ip in that case because revive_db has already done the re-ip work. - vcc.Log.V(1).Info("communal storage location is not specified" + warningMsg) + vcc.Log.V(1).Info(util.CommStorageLoc + warningMsg) } } diff --git a/vclusterops/replication.go b/vclusterops/replication.go index 03ac737..f0dbced 100644 --- a/vclusterops/replication.go +++ b/vclusterops/replication.go @@ -23,17 +23,26 @@ import ( "github.com/vertica/vcluster/vclusterops/vlog" ) +type TargetDatabaseOptions struct { + TargetHosts []string + TargetDB string + TargetUserName string + TargetPassword *string +} + type VReplicationDatabaseOptions struct { /* part 1: basic db info */ DatabaseOptions /* part 2: replication info */ - TargetHosts []string - TargetDB string - TargetUserName string - TargetPassword *string + TargetDatabaseOptions SourceTLSConfig string SandboxName string + Async bool + ObjectName string + IncludePattern string + ExcludePattern string + TargetNamespace string } func VReplicationDatabaseFactory() VReplicationDatabaseOptions { @@ -80,12 +89,43 @@ func (options *VReplicationDatabaseOptions) validateExtraOptions() error { } if options.SandboxName != "" { - err = util.ValidateSandboxName(options.SandboxName) + err := util.ValidateSandboxName(options.SandboxName) + if err != nil { + return err + } + } + + return nil +} + +func (options *VReplicationDatabaseOptions) validateFineGrainedReplicationOptions() error { + if options.ObjectName != "" { + err := util.ValidateQualifiedObjectNamePattern(options.ObjectName, false) + if err != nil { + return err + } + } + + if options.IncludePattern != "" { + err := util.ValidateQualifiedObjectNamePattern(options.IncludePattern, true) if err != nil { return err } } + if options.ExcludePattern != "" { + err := util.ValidateQualifiedObjectNamePattern(options.ExcludePattern, true) + if err != nil { + return err + } + } + + if options.TargetNamespace != "" { + err := util.ValidateName(options.TargetNamespace, "target-namespace", true) + if err != nil { + return err + } + } return nil } @@ -113,6 +153,12 @@ func (options *VReplicationDatabaseOptions) validateParseOptions(logger vlog.Pri if err != nil { return err } + + // batch 5: validate fine-grained database replication options + err = options.validateFineGrainedReplicationOptions() + if err != nil { + return err + } return nil } @@ -230,8 +276,8 @@ func (vcc VClusterCommands) produceDBReplicationInstructions(options *VReplicati initiatorTargetHost := getInitiator(options.TargetHosts) httpsStartReplicationOp, err := makeHTTPSStartReplicationOp(options.DBName, options.Hosts, options.usePassword, - options.UserName, options.Password, targetUsePassword, options.TargetDB, options.TargetUserName, initiatorTargetHost, - options.TargetPassword, options.SourceTLSConfig, options.SandboxName, vdb) + options.UserName, options.Password, targetUsePassword, &options.TargetDatabaseOptions, initiatorTargetHost, + options.SourceTLSConfig, options.SandboxName, vdb) if err != nil { return instructions, err } diff --git a/vclusterops/restore_points_test.go b/vclusterops/restore_points_test.go index e35b336..fa684ee 100644 --- a/vclusterops/restore_points_test.go +++ b/vclusterops/restore_points_test.go @@ -7,6 +7,9 @@ import ( "github.com/stretchr/testify/assert" ) +const defaultStartTime = " 00:00:00" +const defaultEndTime = " 23:59:59" + func TestShowRestorePointFilterOptions_ValidateAndStandardizeTimestampsIfAny(t *testing.T) { // Test case 1: No validation needed filterOptions := ShowRestorePointFilterOptions{ @@ -40,8 +43,8 @@ func TestShowRestorePointFilterOptions_ValidateAndStandardizeTimestampsIfAny(t * const laterDate = "2022-01-02" // Test case 4: Valid start and end timestamps - startTimestamp = earlierDate + " 00:00:00" - endTimestamp = laterDate + " 00:00:00" + startTimestamp = earlierDate + defaultStartTime + endTimestamp = laterDate + defaultStartTime filterOptions = ShowRestorePointFilterOptions{ StartTimestamp: startTimestamp, EndTimestamp: endTimestamp, @@ -62,7 +65,7 @@ func TestShowRestorePointFilterOptions_ValidateAndStandardizeTimestampsIfAny(t * assert.NoError(t, err) filterOptions.StartTimestamp = earlierDate - filterOptions.EndTimestamp = laterDate + " 23:59:59" + filterOptions.EndTimestamp = laterDate + defaultEndTime err = filterOptions.ValidateAndStandardizeTimestampsIfAny() assert.NoError(t, err) @@ -71,7 +74,7 @@ func TestShowRestorePointFilterOptions_ValidateAndStandardizeTimestampsIfAny(t * err = filterOptions.ValidateAndStandardizeTimestampsIfAny() assert.NoError(t, err) - filterOptions.StartTimestamp = earlierDate + " 23:59:59" + filterOptions.StartTimestamp = earlierDate + defaultEndTime filterOptions.EndTimestamp = earlierDate + " 23:59:59.123456789" err = filterOptions.ValidateAndStandardizeTimestampsIfAny() assert.NoError(t, err) diff --git a/vclusterops/revive_db_test.go b/vclusterops/revive_db_test.go index 9a7068a..67dbd11 100644 --- a/vclusterops/revive_db_test.go +++ b/vclusterops/revive_db_test.go @@ -45,7 +45,8 @@ func TestFindSpecifiedRestorePoint(t *testing.T) { options.RestorePoint.ID = expectedID _, err = options.findSpecifiedRestorePoint(allRestorePoints) expectedErr := fmt.Errorf("found 2 restore points instead of 1: " + - "[{Archive:archive1 ID:id3 Index:2 Timestamp: VerticaVersion:} {Archive:archive1 ID:id3 Index:3 Timestamp: VerticaVersion:}]") + "[{Archive:archive1 ID:id3 Index:2 Timestamp: VerticaVersion:} " + + "{Archive:archive1 ID:id3 Index:3 Timestamp: VerticaVersion:}]") assert.EqualError(t, err, expectedErr.Error()) // Test case: No matching restore points found diff --git a/vclusterops/sandbox.go b/vclusterops/sandbox.go index dc05288..abf552c 100644 --- a/vclusterops/sandbox.go +++ b/vclusterops/sandbox.go @@ -27,7 +27,6 @@ type VSandboxOptions struct { SandboxName string SCName string SCHosts []string - SCRawHosts []string // indicate whether a restore point is created when create the sandbox SaveRp bool // indicate whether the metadata of sandbox should be isolated @@ -102,14 +101,6 @@ func (options *VSandboxOptions) analyzeOptions() (err error) { } } - // resolve SCRawHosts to be IP addresses - if len(options.SCRawHosts) > 0 { - options.SCHosts, err = util.ResolveRawHostsToAddresses(options.SCRawHosts, options.IPv6) - if err != nil { - return err - } - } - return nil } @@ -164,18 +155,8 @@ func (vcc *VClusterCommands) produceSandboxSubclusterInstructions(options *VSand // Run Sandboxing httpsSandboxSubclusterOp, err := makeHTTPSandboxingOp(vcc.Log, options.SCName, options.SandboxName, - usePassword, username, options.Password, options.SaveRp, options.Imeta, options.Sls, options.ForUpgrade) - if err != nil { - return instructions, err - } - - // Poll for sandboxed nodes to be up - scHosts := []string{} - for _, host := range options.NodeNameAddressMap { - scHosts = append(scHosts, host) - } - httpsPollSubclusterNodeOp, err := makeHTTPSPollSubclusterNodeStateUpOp(scHosts, options.SCName, - usePassword, username, options.Password) + usePassword, username, options.Password, options.SaveRp, options.Imeta, options.Sls, options.ForUpgrade, + &options.SCHosts) if err != nil { return instructions, err } @@ -184,7 +165,6 @@ func (vcc *VClusterCommands) produceSandboxSubclusterInstructions(options *VSand &httpsGetUpNodesOp, &httpsCheckSubclusterSandboxOp, &httpsSandboxSubclusterOp, - &httpsPollSubclusterNodeOp, ) return instructions, nil @@ -228,6 +208,15 @@ func (options *VSandboxOptions) runCommand(vcc VClusterCommands) error { if runError != nil { return fmt.Errorf("fail to sandbox subcluster %s, %w", options.SCName, runError) } + + // assume the caller knows the status of the cluster better than us, override whatever the sandbox op set + if len(options.NodeNameAddressMap) > 0 { + options.SCHosts = []string{} + for _, ip := range options.NodeNameAddressMap { + options.SCHosts = append(options.SCHosts, ip) + } + } + return nil } diff --git a/vclusterops/save_restore_points.go b/vclusterops/save_restore_points.go new file mode 100644 index 0000000..4cd7f6e --- /dev/null +++ b/vclusterops/save_restore_points.go @@ -0,0 +1,172 @@ +/* + (c) Copyright [2023-2024] Open Text. + Licensed under the Apache License, Version 2.0 (the "License"); + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package vclusterops + +import ( + "fmt" + + "github.com/vertica/vcluster/vclusterops/util" + "github.com/vertica/vcluster/vclusterops/vlog" +) + +type VSaveRestorePointOptions struct { + DatabaseOptions + ArchiveName string + + // the name of the sandbox to target, if left empty the main cluster is assumed + Sandbox string +} + +func VSaveRestorePointFactory() VSaveRestorePointOptions { + options := VSaveRestorePointOptions{} + // set default values to the params + options.setDefaultValues() + return options +} + +func (options *VSaveRestorePointOptions) validateEonOptions(_ vlog.Printer) error { + if !options.IsEon { + return fmt.Errorf("save restore point is only supported in Eon mode") + } + return nil +} + +// Save restore impl +func (options *VSaveRestorePointOptions) validateRequiredOptions(logger vlog.Printer) error { + err := options.validateEonOptions(logger) + if err != nil { + return err + } + err = options.validateBaseOptions(SaveRestorePointsCmd, logger) + if err != nil { + return err + } + if options.ArchiveName == "" { + return fmt.Errorf("must specify an archive name") + } + err = util.ValidateArchiveName(options.ArchiveName) + if err != nil { + return err + } + return nil +} + +func (options *VSaveRestorePointOptions) validateExtraOptions() error { + if options.Sandbox != "" { + return util.ValidateSandboxName(options.Sandbox) + } + return nil +} + +func (options *VSaveRestorePointOptions) validateParseOptions(logger vlog.Printer) error { + // batch 1: validate required parameters + err := options.validateRequiredOptions(logger) + if err != nil { + return err + } + + // batch 2: validate all other params + err = options.validateExtraOptions() + if err != nil { + return err + } + return nil +} + +// analyzeOptions will modify some options based on what is chosen +func (options *VSaveRestorePointOptions) analyzeOptions() (err error) { + // we analyze host names when it is set in user input, otherwise we use hosts in yaml config + if len(options.RawHosts) > 0 { + // resolve RawHosts to be IP addresses + hostAddresses, err := util.ResolveRawHostsToAddresses(options.RawHosts, options.IPv6) + if err != nil { + return err + } + options.Hosts = hostAddresses + } + return nil +} + +func (options *VSaveRestorePointOptions) validateAnalyzeOptions(logger vlog.Printer) error { + if err := options.validateParseOptions(logger); err != nil { + return err + } + if err := options.validateUserName(logger); err != nil { + return err + } + return options.analyzeOptions() +} + +// VSaveRestorePoint can save restore point to a given archive +func (vcc VClusterCommands) VSaveRestorePoint(options *VSaveRestorePointOptions) (err error) { + /* + * - Produce Instructions + * - Create a VClusterOpEngine + * - Give the instructions to the VClusterOpEngine to run + */ + + // validate and analyze options + err = options.validateAnalyzeOptions(vcc.Log) + if err != nil { + return err + } + + // produce save restore points instructions + instructions, err := vcc.produceSaveRestorePointsInstructions(options) + if err != nil { + return fmt.Errorf("fail to produce instructions, %w", err) + } + + // create a VClusterOpEngine, and add certs to the engine + clusterOpEngine := makeClusterOpEngine(instructions, options) + + // give the instructions to the VClusterOpEngine to run + runError := clusterOpEngine.run(vcc.Log) + if runError != nil { + return fmt.Errorf("fail to save restore point: %w", runError) + } + return nil +} + +// The generated instructions will later perform the following operations necessary +// for a successful save_restore_point: +// - Retrieve VDB from HTTP endpoints +// - Check NMA connectivity +// - Run save restore points on the target node +func (vcc VClusterCommands) produceSaveRestorePointsInstructions(options *VSaveRestorePointOptions) ([]clusterOp, error) { + var instructions []clusterOp + vdb := makeVCoordinationDatabase() + + err := vcc.getVDBFromRunningDBIncludeSandbox(&vdb, &options.DatabaseOptions, util.MainClusterSandbox) + if err != nil { + return instructions, err + } + + // get up hosts + hosts := options.Hosts + nmaHealthOp := makeNMAHealthOp(options.Hosts) + // Trim host list + hosts = vdb.filterUpHostlist(hosts, options.Sandbox) + bootstrapHost := []string{getInitiator(hosts)} + + nmaSaveRestorePointOp := makeNMASaveRestorePointsOp(vcc.Log, bootstrapHost, + options.DBName, options.UserName, options.ArchiveName, options.Sandbox) + + instructions = append(instructions, + &nmaHealthOp, + &nmaSaveRestorePointOp) + return instructions, nil +} diff --git a/vclusterops/set_config_parameter_test.go b/vclusterops/set_config_parameter_test.go index 817c41b..0c78b2a 100644 --- a/vclusterops/set_config_parameter_test.go +++ b/vclusterops/set_config_parameter_test.go @@ -26,10 +26,10 @@ func TestVSetConfigurationParameterOptions_validateParseOptions(t *testing.T) { logger := vlog.Printer{} opt := VSetConfigurationParameterOptionsFactory() - testPassword := "set-config-test-password" //nolint:gosec + testPd := "set-config-test-pd" testSandbox := "set-config-test-sandbox" testDBName := "set_config_test_dbname" - testUsername := "set-config-test-username" //nolint:gosec + testUsername := "set-config-test-un" testConfigParameter := "set-config-test-parameter" testValue := "set-config-test-value" testLevel := "set-config-test-level" @@ -38,7 +38,7 @@ func TestVSetConfigurationParameterOptions_validateParseOptions(t *testing.T) { opt.RawHosts = append(opt.RawHosts, "set-config-test-raw-host") opt.DBName = testDBName opt.UserName = testUsername - opt.Password = &testPassword + opt.Password = &testPd opt.ConfigParameter = testConfigParameter opt.Value = testValue opt.Level = testLevel diff --git a/vclusterops/restore_points.go b/vclusterops/show_restore_points.go similarity index 97% rename from vclusterops/restore_points.go rename to vclusterops/show_restore_points.go index 6c5101b..89ff1ff 100644 --- a/vclusterops/restore_points.go +++ b/vclusterops/show_restore_points.go @@ -23,6 +23,8 @@ import ( "github.com/vertica/vcluster/vclusterops/vlog" ) +const cantParse = "cannot parse as a date as well: %w" + type VShowRestorePointsOptions struct { DatabaseOptions // Optional arguments to list only restore points that @@ -68,7 +70,7 @@ func (options *ShowRestorePointFilterOptions) ValidateAndStandardizeTimestampsIf if dateOnlyErr != nil { // give up return fmt.Errorf("start timestamp %q is invalid; cannot parse as a datetime: %w; "+ - "cannot parse as a date as well: %w", options.StartTimestamp, dateTimeErr, dateOnlyErr) + cantParse, options.StartTimestamp, dateTimeErr, dateOnlyErr) } // default value of time parsed from date only string is already indicating the start of a day // invoke this function here to only rewrite options.StartTimestamp in date time format @@ -83,7 +85,7 @@ func (options *ShowRestorePointFilterOptions) ValidateAndStandardizeTimestampsIf if dateOnlyErr != nil { // give up return fmt.Errorf("end timestamp %q is invalid; cannot parse as a datetime: %w; "+ - "cannot parse as a date as well: %w", options.EndTimestamp, dateTimeErr, dateOnlyErr) + cantParse, options.EndTimestamp, dateTimeErr, dateOnlyErr) } // fill in default value for time and update the end timestamp parsedEndDatetime = util.FillInDefaultTimeForEndTimestamp(&options.EndTimestamp) diff --git a/vclusterops/start_db.go b/vclusterops/start_db.go index c8a2acd..2bcb911 100644 --- a/vclusterops/start_db.go +++ b/vclusterops/start_db.go @@ -138,12 +138,12 @@ func (vcc VClusterCommands) VStartDatabase(options *VStartDatabaseOptions) (vdbP // contain accurate info of nodes in a sandbox if !options.HostsInSandbox && options.IsEon { const warningMsg = " for an Eon database, start_db after revive_db could fail " + - "because we cannot retrieve the correct database information" + util.DBInfo if options.CommunalStorageLocation != "" { vdbNew, e := options.getVDBWhenDBIsDown(vcc) if e != nil { // show a warning message if we cannot get VDB from a down database - vcc.Log.PrintWarning("failed to retrieve the communal storage location" + warningMsg) + vcc.Log.PrintWarning(util.CommStorageFail + warningMsg) } else { // we want to read catalog info only from primary nodes later vdbNew.filterPrimaryNodes() @@ -152,7 +152,7 @@ func (vcc VClusterCommands) VStartDatabase(options *VStartDatabaseOptions) (vdbP } else { // When communal storage location is missing, we only log a warning message // because fail to read cluster_config.json will not affect start_db in most of the cases. - vcc.Log.PrintWarning("communal storage location is not specified" + warningMsg) + vcc.Log.PrintWarning(util.CommStorageLoc + warningMsg) } } numTotalNodes := len(options.Hosts) diff --git a/vclusterops/unsandbox.go b/vclusterops/unsandbox.go index fc0f098..906bb0b 100644 --- a/vclusterops/unsandbox.go +++ b/vclusterops/unsandbox.go @@ -248,7 +248,7 @@ func (vcc *VClusterCommands) produceUnsandboxSCInstructions(options *VUnsandboxO // Run Unsandboxing httpsUnsandboxSubclusterOp, err := makeHTTPSUnsandboxingOp(options.SCName, - usePassword, username, options.Password) + usePassword, username, options.Password, &options.SCHosts) if err != nil { return instructions, err } @@ -277,18 +277,10 @@ func (vcc *VClusterCommands) produceUnsandboxSCInstructions(options *VUnsandboxO // Start the nodes nmaStartNodesOp := makeNMAStartNodeOpAfterUnsandbox("") - // Poll for nodes UP - httpsPollScUp, err := makeHTTPSPollSubclusterNodeStateUpOp(scHosts, options.SCName, - usePassword, username, options.Password) - if err != nil { - return instructions, err - } - instructions = append(instructions, &nmaVersionCheck, &httpsStartUpCommandOp, &nmaStartNodesOp, - &httpsPollScUp, ) } @@ -331,5 +323,13 @@ func (options *VUnsandboxOptions) runCommand(vcc VClusterCommands) error { if runError != nil { return fmt.Errorf("fail to unsandbox subcluster %s, %w", options.SCName, runError) } + + // assume the caller knows the status of the cluster better than us, override whatever the unsandbox op set + if len(options.NodeNameAddressMap) > 0 { + options.SCHosts = []string{} + for _, ip := range options.NodeNameAddressMap { + options.SCHosts = append(options.SCHosts, ip) + } + } return nil } diff --git a/vclusterops/util/util.go b/vclusterops/util/util.go index 70f4cf9..bedbb7c 100644 --- a/vclusterops/util/util.go +++ b/vclusterops/util/util.go @@ -45,6 +45,29 @@ type FetchAllEnvVars interface { TypeName() string } +const ( + RootDir = "/" + NodeInfoCountMismatch = "[%s] expect one node's information, but got %d nodes' information from HTTPS /v1/nodes/ endpoint on host %s" + DepotSizeHint = "integer%, which expresses the depot size as a percentage of the total disk size." + DepotSizeKMGTMsg = "integer{K|M|G|T}, where K is kilobytes, M is megabytes, G is gigabytes, and T is terabytes.\n" + DepotFmtMsg = "Size of depot in one of the following formats:\n" + TimeToWaitToClose = "The time to wait, in seconds, for user connections to close on their own.\n" + TimeExpire = "When the time expires, user connections are automatically closed and the database is hut down.\n" + InfiniteWaitTime = "If the value is negative, VCluster waits indefinitely until all user connections close." + CloseAllConns = "If set to 0, VCluster closes all user connections immediately.\n" + Default = "Default: " + FailToWriteToConfig = "Failed to write the configuration file: " + CallCommand = "Calling method Run() for command " + DBInfo = "because we cannot retrieve the correct database information" + CommStorageLoc = "communal storage location is not specified" + CommStorageFail = "failed to retrieve the communal storage location" + SubclustersEndpoint = "subclusters/" + ShutDownEndpoint = "/shutdown" + NodesEndpoint = "nodes/" + DropEndpoint = "/drop" + ArchiveEndpoint = "archives" +) + const ( keyValueArrayLen = 2 ipv4Str = "IPv4" @@ -60,6 +83,8 @@ const ( nmaRootCAPathEnvVar = "NMA_ROOTCA_PATH" nmaCertPathEnvVar = "NMA_CERT_PATH" nmaKeyPathEnvVar = "NMA_KEY_PATH" + + objectNameUnsupportedCharacters = `=<>'^\".@?#&/:;{}()[] \~!%+|,` + "`$" ) // NmaSecretLookup retrieves kubernetes secrets. @@ -223,9 +248,8 @@ func ResolveToAbsPath(path string) (string, error) { return homeDir, nil } else if strings.HasPrefix(path, "~/") { return filepath.Join(homeDir, path[2:]), nil - } else { - return "", fmt.Errorf("invalid path") } + return "", fmt.Errorf("invalid path") } // IP util functions @@ -478,7 +502,7 @@ func IsOptionSet(f *flag.FlagSet, optionName string) bool { // ValidateName will validate the name of an obj, the obj can be database, subcluster, etc. // when a name is provided, make sure no special chars are in it func ValidateName(name, obj string, allowDash bool) error { - escapeChars := `=<>'^\".@*?#&/:;{}()[] \~!%+|,` + "`$" + escapeChars := objectNameUnsupportedCharacters + "*" if !allowDash { escapeChars += "-" } @@ -490,6 +514,42 @@ func ValidateName(name, obj string, allowDash bool) error { return nil } +// ValidateQualifiedObjectNamePattern will validate the pattern of [.namespace].schema.table, separated by "," +// Return nil when its valid, else will panic +func ValidateQualifiedObjectNamePattern(pattern string, allowAsterisk bool) error { + const maxPatternLen = 128 + + // Build a regex that matches any unsupported characters + disallowedChars := objectNameUnsupportedCharacters + if !allowAsterisk { + disallowedChars += "*" + } + disallowedCharsRegex := regexp.QuoteMeta(disallowedChars) + + // Validates [.namespace].schema.table format and disallows any special characters + // Ref: https://docs.vertica.com/24.1.x/en/sql-reference/language-elements/identifiers/ + qualifiedObjectNameRegex := fmt.Sprintf(`^(\.[^%s]+\.)?([^%s]+\.)?[^%s]+$`, + disallowedCharsRegex, disallowedCharsRegex, disallowedCharsRegex) + r := regexp.MustCompile(qualifiedObjectNameRegex) + + objects := strings.Split(pattern, ",") + for _, obj := range objects { + // start with v_ is invalid + if strings.HasPrefix(obj, "v_") { + return fmt.Errorf("invalid character in pattern %s: %s", pattern, obj) + } + // len > 128 is invalid + if len([]rune(obj)) > maxPatternLen { + return fmt.Errorf("pattern is too long %s: %s", pattern, obj) + } + match := r.MatchString(obj) + if !match { + return fmt.Errorf("invalid pattern %s: %s", pattern, obj) + } + } + return nil +} + func ValidateDBName(dbName string) error { return ValidateName(dbName, "database", false) } @@ -502,6 +562,10 @@ func ValidateSandboxName(dbName string) error { return ValidateName(dbName, "sandbox", true) } +func ValidateArchiveName(archive string) error { + return ValidateName(archive, "archive", true) +} + // suppress help message for hidden options func SetParserUsage(parser *flag.FlagSet, op string) { fmt.Printf("Usage of %s:\n", op) @@ -680,3 +744,12 @@ func IsK8sEnvironment() bool { port, portSet := os.LookupEnv(kubernetesPort) return portSet && port != "" } + +// GetClusterName can return the correct cluster name based on the sandbox name. +// It can help people to log the cluster name. +func GetClusterName(sandbox string) string { + if sandbox == "" { + return "main cluster" + } + return "sandbox " + sandbox +} diff --git a/vclusterops/util/util_test.go b/vclusterops/util/util_test.go index d4e6bad..6790c32 100644 --- a/vclusterops/util/util_test.go +++ b/vclusterops/util/util_test.go @@ -29,6 +29,8 @@ import ( type NMAHealthOpResponse map[string]string +const InvalChar = "invalid character in " + func redirectLog() (*bytes.Buffer, vlog.Printer) { // redirect log to a local bytes.Buffer var logBuffer bytes.Buffer @@ -268,19 +270,95 @@ func TestValidateName(t *testing.T) { // negative cases err = ValidateName("test$db", obj, false) - assert.ErrorContains(t, err, "invalid character in "+obj+" name: $") + assert.ErrorContains(t, err, InvalChar+obj+" name: $") err = ValidateName("[db1]", obj, false) - assert.ErrorContains(t, err, "invalid character in "+obj+" name: [") + assert.ErrorContains(t, err, InvalChar+obj+" name: [") err = ValidateName("!!??!!db1", obj, false) - assert.ErrorContains(t, err, "invalid character in "+obj+" name: !") + assert.ErrorContains(t, err, InvalChar+obj+" name: !") err = ValidateName("test-db", obj, false) - assert.ErrorContains(t, err, "invalid character in "+obj+" name: -") + assert.ErrorContains(t, err, InvalChar+obj+" name: -") err = ValidateName("test-db", obj, true) assert.Nil(t, err) + + err = ValidateName("0test-db", obj, true) + assert.Nil(t, err) +} + +func TestValidateQualifiedObjectNamePattern(t *testing.T) { + // positive cases + obj := "schema.database" + err := ValidateQualifiedObjectNamePattern(obj, true) + assert.Nil(t, err) + + obj = "schema.*" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.Nil(t, err) + + obj = "*.database" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.Nil(t, err) + + obj = "valid.valid,valid.valid" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.Nil(t, err) + + const matchAnySchemaTable = "*.*" + err = ValidateQualifiedObjectNamePattern(matchAnySchemaTable, true) + assert.Nil(t, err) + + const matchAnyTable = "*" + err = ValidateQualifiedObjectNamePattern(matchAnyTable, true) + assert.Nil(t, err) + + obj = ".namespace.*.*" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.Nil(t, err) + + obj = ".namespace.schema.table" + err = ValidateQualifiedObjectNamePattern(obj, false) + assert.Nil(t, err) + + // negative cases + + const ( + invalidCharacter = "invalid character in pattern " + invalidPattern = "invalid pattern " + ) + + obj = "v_invalid.valid" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.ErrorContains(t, err, invalidCharacter+obj+": v_invalid.valid") + + obj = "valid.valid,v_invalid_name.valid" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.ErrorContains(t, err, invalidCharacter+obj+": v_invalid_name.valid") + + obj = `valid.v_invalid_TO_loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo + oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo + oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong` + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.ErrorContains(t, err, "pattern is too long "+obj+ + `: valid.v_invalid_TO_loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo + oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo + oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong`) + + obj = "no_leading_dot.*.*" + err = ValidateQualifiedObjectNamePattern(obj, true) + assert.ErrorContains(t, err, invalidPattern+obj+": no_leading_dot.*.*") + + obj = ".wildcards.*.*" + err = ValidateQualifiedObjectNamePattern(obj, false) + assert.ErrorContains(t, err, invalidPattern+obj+": .wildcards.*.*") + + err = ValidateQualifiedObjectNamePattern(matchAnySchemaTable, false) + assert.ErrorContains(t, err, invalidPattern+matchAnySchemaTable+": *.*") + + err = ValidateQualifiedObjectNamePattern(matchAnyTable, false) + assert.ErrorContains(t, err, invalidPattern+matchAnyTable+": *") } func TestSetEonFlagHelpMsg(t *testing.T) { @@ -403,3 +481,11 @@ func TestGetEnvInt(t *testing.T) { actual = GetEnvInt(key, fallback) assert.Equal(t, fallback, actual) } + +func TestGetClusterName(t *testing.T) { + cluster := GetClusterName("") + assert.Equal(t, "main cluster", cluster) + + cluster = GetClusterName("sand1") + assert.Equal(t, "sandbox sand1", cluster) +}