From f59a8633abb4eeddb9113c836ef4f9158be6c264 Mon Sep 17 00:00:00 2001 From: secwall Date: Tue, 26 Nov 2024 19:13:17 +0100 Subject: [PATCH] Fix some minor typos --- internal/app/types.go | 2 +- internal/redis/node.go | 26 +++++++++---------- internal/redis/senticache.go | 4 +-- internal/redis/shard.go | 2 +- redis_patches/0003_Add_offline_mode.patch | 2 +- .../05_cluster_replication_fix.feature | 2 +- .../05_sentinel_replication_fix.feature | 2 +- tests/rdsync_test.go | 12 ++++----- tests/testutil/docker_composer.go | 8 +++--- tests/testutil/matchers/matchers.go | 2 +- 10 files changed, 31 insertions(+), 31 deletions(-) diff --git a/internal/app/types.go b/internal/app/types.go index 05d6190..81542ca 100644 --- a/internal/app/types.go +++ b/internal/app/types.go @@ -203,7 +203,7 @@ func (ss *SentiCacheState) String() string { const ( // CauseManual means switchover was issued via command line CauseManual = "manual" - // CauseWorker means switchover was initated via MDB worker (set directly to dcs) + // CauseWorker means switchover was initiated via DCS CauseWorker = "worker" // CauseAuto means failover was started automatically by failure detection process CauseAuto = "auto" diff --git a/internal/redis/node.go b/internal/redis/node.go index 206f38b..32bbb94 100644 --- a/internal/redis/node.go +++ b/internal/redis/node.go @@ -302,9 +302,9 @@ func (n *Node) GetQuorumReplicas(ctx context.Context) (string, error) { if len(vals) != 2 { return "", fmt.Errorf("unexpected config get result for quorum-replicas: %v", vals) } - splitted := strings.Split(vals[1], " ") - sort.Strings(splitted) - return strings.Join(splitted, " "), nil + split := strings.Split(vals[1], " ") + sort.Strings(split) + return strings.Join(split, " "), nil } // SetQuorumReplicas sets desired quorum replicas @@ -409,8 +409,8 @@ func (n *Node) Restart(ctx context.Context) error { return fmt.Errorf("restarting %s is not possible - not local", n.fqdn) } n.logger.Warn(fmt.Sprintf("Restarting with %s", n.config.Redis.RestartCommand)) - splitted := strings.Fields(n.config.Redis.RestartCommand) - cmd := exec.CommandContext(ctx, splitted[0], splitted[1:]...) + split := strings.Fields(n.config.Redis.RestartCommand) + cmd := exec.CommandContext(ctx, split[0], split[1:]...) return cmd.Run() } @@ -540,13 +540,13 @@ func (n *Node) IsClusterMajorityAlive(ctx context.Context) (bool, error) { failedMasters := 0 lines := strings.Split(cmd.Val(), "\n") for _, line := range lines { - splitted := strings.Split(line, " ") - if len(splitted) < 3 { + split := strings.Split(line, " ") + if len(split) < 3 { continue } - if strings.Contains(splitted[2], "master") { + if strings.Contains(split[2], "master") { totalMasters += 1 - if strings.Contains(splitted[2], "fail") { + if strings.Contains(split[2], "fail") { failedMasters += 1 } } @@ -600,12 +600,12 @@ func (n *Node) HasClusterSlots(ctx context.Context) (bool, error) { } lines := strings.Split(cmd.Val(), "\n") for _, line := range lines { - splitted := strings.Split(line, " ") - if len(splitted) < 3 { + split := strings.Split(line, " ") + if len(split) < 3 { continue } - if strings.Contains(splitted[2], "myself") { - return len(splitted) > 8, nil + if strings.Contains(split[2], "myself") { + return len(split) > 8, nil } } return false, nil diff --git a/internal/redis/senticache.go b/internal/redis/senticache.go index 6d75b12..87d2043 100644 --- a/internal/redis/senticache.go +++ b/internal/redis/senticache.go @@ -105,8 +105,8 @@ func (s *SentiCacheNode) Close() error { func (s *SentiCacheNode) restart(ctx context.Context) error { s.logger.Error("Restarting broken senticache") - splitted := strings.Fields(s.config.SentinelMode.CacheRestartCommand) - cmd := exec.CommandContext(ctx, splitted[0], splitted[1:]...) + split := strings.Fields(s.config.SentinelMode.CacheRestartCommand) + cmd := exec.CommandContext(ctx, split[0], split[1:]...) return cmd.Run() } diff --git a/internal/redis/shard.go b/internal/redis/shard.go index 51a41b7..3d0a094 100644 --- a/internal/redis/shard.go +++ b/internal/redis/shard.go @@ -26,7 +26,7 @@ type NodeConfiguration struct { Priority int `json:"priority"` } -// NewShard is a Shard constrcutor +// NewShard is a Shard constructor func NewShard(config *config.Config, logger *slog.Logger, dcs dcs.DCS) *Shard { s := &Shard{ config: config, diff --git a/redis_patches/0003_Add_offline_mode.patch b/redis_patches/0003_Add_offline_mode.patch index 8aa323b..4575e82 100644 --- a/redis_patches/0003_Add_offline_mode.patch +++ b/redis_patches/0003_Add_offline_mode.patch @@ -84,7 +84,7 @@ index be2405170..05bdf5c0c 100644 int get_ack_from_slaves; /* If true we send REPLCONF GETACK. */ int repl_paused; /* If true we don't try to connect to master */ + /* Offline mode */ -+ int offline; /* If true only localhost connectiona are accepted */ ++ int offline; /* If true only localhost connections are accepted */ + int offline_initial; /* Initial state of offline mode (from config) */ /* Limits */ unsigned int maxclients; /* Max number of simultaneous clients */ diff --git a/tests/features/05_cluster_replication_fix.feature b/tests/features/05_cluster_replication_fix.feature index 0502f6c..7a870b1 100644 --- a/tests/features/05_cluster_replication_fix.feature +++ b/tests/features/05_cluster_replication_fix.feature @@ -121,7 +121,7 @@ Feature: Cluster mode broken replication fix "redis1" """ - Scenario: Cluster mode accidential cascade replication is fixed + Scenario: Cluster mode accidental cascade replication is fixed Given clustered shard is up and running Then redis host "redis1" should be master And redis host "redis2" should become replica of "redis1" within "15" seconds diff --git a/tests/features/05_sentinel_replication_fix.feature b/tests/features/05_sentinel_replication_fix.feature index 77f8938..dcb58d5 100644 --- a/tests/features/05_sentinel_replication_fix.feature +++ b/tests/features/05_sentinel_replication_fix.feature @@ -137,7 +137,7 @@ Feature: Sentinel mode broken replication fix "redis1" """ - Scenario: Sentinel mode accidential cascade replication is fixed + Scenario: Sentinel mode accidental cascade replication is fixed Given sentinel shard is up and running Then redis host "redis1" should be master And redis host "redis2" should become replica of "redis1" within "15" seconds diff --git a/tests/rdsync_test.go b/tests/rdsync_test.go index 380ea47..69ebdee 100644 --- a/tests/rdsync_test.go +++ b/tests/rdsync_test.go @@ -685,9 +685,9 @@ func (tctx *testContext) stepCommandOutputShouldMatch(matcher string, body *godo } func (tctx *testContext) stepIRunCmdOnHost(host string, body *godog.DocString) error { - splitted := strings.Split(strings.TrimSpace(body.Content), "\"") + split := strings.Split(strings.TrimSpace(body.Content), "\"") var args []string - for index, arg := range splitted { + for index, arg := range split { if index%2 == 1 { args = append(args, strings.TrimSpace(arg)) } else { @@ -707,9 +707,9 @@ func (tctx *testContext) stepRedisCmdResultShouldMatch(matcher string, body *god } func (tctx *testContext) stepIRunSenticacheCmdOnHost(host string, body *godog.DocString) error { - splitted := strings.Split(strings.TrimSpace(body.Content), "\"") + split := strings.Split(strings.TrimSpace(body.Content), "\"") var args []string - for index, arg := range splitted { + for index, arg := range split { if index%2 == 1 { args = append(args, strings.TrimSpace(arg)) } else { @@ -947,7 +947,7 @@ func (tctx *testContext) stepSenticacheHostShouldHaveMasterWithin(host, master s return err } -func (tctx *testContext) stepISaveZookeperQueryResultAs(varname string) error { +func (tctx *testContext) stepISaveZookeeperQueryResultAs(varname string) error { var j interface{} if tctx.zkQueryResult != "" { if err := json.Unmarshal([]byte(tctx.zkQueryResult), &j); err != nil { @@ -1110,7 +1110,7 @@ func InitializeScenario(s *godog.ScenarioContext) { s.Step(`^I break replication on host "([^"]*)"$`, tctx.stepBreakReplicationOnHost) // variables - s.Step(`^I save zookeeper query result as "([^"]*)"$`, tctx.stepISaveZookeperQueryResultAs) + s.Step(`^I save zookeeper query result as "([^"]*)"$`, tctx.stepISaveZookeeperQueryResultAs) s.Step(`^I save command output as "([^"]*)"$`, tctx.stepISaveCommandOutputAs) s.Step(`^I save redis cmd result as "([^"]*)"$`, tctx.stepISaveRedisCmdResultAs) s.Step(`^I save "([^"]*)" as "([^"]*)"$`, tctx.stepISaveValAs) diff --git a/tests/testutil/docker_composer.go b/tests/testutil/docker_composer.go index a72661e..595dbc1 100644 --- a/tests/testutil/docker_composer.go +++ b/tests/testutil/docker_composer.go @@ -39,9 +39,9 @@ type Composer interface { Stop(service string) error // Starts container/VM Start(service string) error - // Detachs container/VM from network + // Detaches container/VM from network DetachFromNet(service string) error - // Attachs container/VM to network + // Attaches container/VM to network AttachToNet(service string) error // Blocks port on host BlockPort(service string, port int) error @@ -253,7 +253,7 @@ func (dc *DockerComposer) RunAsyncCommand(service string, cmd string) error { return dc.api.ContainerExecStart(context.Background(), execResp.ID, container.ExecStartOptions{}) } -// GetFile returns content of the fail from continer by path +// GetFile returns content of the fail from container by path func (dc *DockerComposer) GetFile(service, path string) (io.ReadCloser, error) { cont, ok := dc.containers[service] if !ok { @@ -296,7 +296,7 @@ func (dc *DockerComposer) Stop(service string) error { return err } -// AttachToNet attachs container to network +// AttachToNet attaches container to network func (dc *DockerComposer) AttachToNet(service string) error { _, ok := dc.containers[service] if !ok { diff --git a/tests/testutil/matchers/matchers.go b/tests/testutil/matchers/matchers.go index 341c715..d10a972 100644 --- a/tests/testutil/matchers/matchers.go +++ b/tests/testutil/matchers/matchers.go @@ -154,7 +154,7 @@ func JSONExactlyMatcher(actual string, expected string) error { return nil } -// GetMatcher returns registred matcher by name +// GetMatcher returns registered matcher by name func GetMatcher(name string) (Matcher, error) { if matcher, ok := registry[name]; ok { return matcher, nil