diff --git a/.changelog/5070.cfg.md b/.changelog/5070.cfg.md index 28bfcb7c5d5..4e88bf4690f 100644 --- a/.changelog/5070.cfg.md +++ b/.changelog/5070.cfg.md @@ -66,7 +66,7 @@ to log level. Use the `default` module to map the default log level. - `consensus.tendermint.p2p.unconditional_peer` to `consensus.p2p.unconditional_peers`. -- `ias.proxy.address` to `ias.proxy_address`. +- `ias.proxy.address` to `ias.proxy_addresses`. - `ias.debug.skip_verify` to `ias.debug_skip_verify`. diff --git a/.changelog/5376.feature.md b/.changelog/5376.feature.md index 7c4ced6996c..985c5b5718d 100644 --- a/.changelog/5376.feature.md +++ b/.changelog/5376.feature.md @@ -4,7 +4,6 @@ The following `oasis-node` subcommands have been deprecated and should no longer be used in favor of the [Oasis CLI]: - `consensus *` -- `control clear-deregister` (no longer needed) - `control runtime-stats` - `debug bundle *` - `governance *` diff --git a/go/ias/config/config.go b/go/ias/config/config.go index 929eedc4485..8afb0b9bb8a 100644 --- a/go/ias/config/config.go +++ b/go/ias/config/config.go @@ -4,7 +4,7 @@ package config // Config is the IAS configuration structure. type Config struct { // IAS proxy address in the form ID@HOST:PORT. - ProxyAddress []string `yaml:"proxy_address"` + ProxyAddresses []string `yaml:"proxy_addresses"` // Skip IAS AVR signature verification (UNSAFE). DebugSkipVerify bool `yaml:"debug_skip_verify,omitempty"` @@ -18,7 +18,7 @@ func (c *Config) Validate() error { // DefaultConfig returns the default configuration settings. func DefaultConfig() Config { return Config{ - ProxyAddress: []string{}, + ProxyAddresses: []string{}, DebugSkipVerify: false, } } diff --git a/go/ias/init.go b/go/ias/init.go index f364882d287..4cc7b7a683b 100644 --- a/go/ias/init.go +++ b/go/ias/init.go @@ -24,6 +24,6 @@ func New(identity *identity.Identity) ([]api.Endpoint, error) { return client.New( identity, - config.GlobalConfig.IAS.ProxyAddress, + config.GlobalConfig.IAS.ProxyAddresses, ) } diff --git a/go/oasis-node/cmd/config/migrate/migrate.go b/go/oasis-node/cmd/config/migrate/migrate.go index ddf1d1ff863..028fb23a0c8 100644 --- a/go/oasis-node/cmd/config/migrate/migrate.go +++ b/go/oasis-node/cmd/config/migrate/migrate.go @@ -391,9 +391,9 @@ func doMigrateConfig(cmd *cobra.Command, args []string) { if history, ok := m(runtime)["history"]; ok { if pruner, ok := m(history)["pruner"]; ok { - logger.Info("runtime.history.pruner is now runtime.history_pruner") + logger.Info("runtime.history.pruner is now runtime.prune") mkSubMap(newCfg, "runtime") - m(newCfg["runtime"])["history_pruner"] = m(pruner) + m(newCfg["runtime"])["prune"] = m(pruner) delete(m(history), "pruner") } else { logger.Warn("input has invalid entries under runtime.history") @@ -548,6 +548,53 @@ func doMigrateConfig(cmd *cobra.Command, args []string) { m(m(newCfg["p2p"])["registration"])["addresses"] = addresses delete(m(p2p), "addresses") } + + // Migrate gossipsub config. + for _, k := range []string{ + "peer_outbound_queue_size", + "validate_queue_size", + "validate_concurrency", + "validate_throttle", + } { + if v, ok := m(p2p)[k]; ok { + logger.Info(fmt.Sprintf("worker.p2p.%s is now p2p.gossipsub.%s", k, k)) + mkSubMap(newCfg, "p2p") + mkSubMap(m(newCfg["p2p"]), "gossipsub") + m(m(newCfg["p2p"])["gossipsub"])[k] = v + delete(m(p2p), k) + } + } + + // Migrate connection manager config. + for _, k := range []string{ + "max_num_peers", + "peer_grace_period", + "persistent_peers", + } { + if v, ok := m(p2p)[k]; ok { + logger.Info(fmt.Sprintf("worker.p2p.%s is now p2p.connection_manager.%s", k, k)) + mkSubMap(newCfg, "p2p") + mkSubMap(m(newCfg["p2p"]), "connection_manager") + m(m(newCfg["p2p"])["connection_manager"])[k] = v + delete(m(p2p), k) + } + } + + if blocked_peers, ok := m(p2p)["blocked_peers"]; ok { + logger.Info("worker.p2p.blocked_peers is now p2p.connection_gater.blocked_peers") + mkSubMap(newCfg, "p2p") + mkSubMap(m(newCfg["p2p"]), "connection_gater") + m(m(newCfg["p2p"])["connection_gater"])["blocked_peers"] = blocked_peers + delete(m(p2p), "blocked_peers") + } + + if connectedness_low_water, ok := m(p2p)["connectedness_low_water"]; ok { + logger.Info("worker.p2p.connectedness_low_water is now p2p.peer_manager.connectedness_low_water") + mkSubMap(newCfg, "p2p") + mkSubMap(m(newCfg["p2p"]), "peer_manager") + m(m(newCfg["p2p"])["peer_manager"])["connectedness_low_water"] = connectedness_low_water + delete(m(p2p), "connectedness_low_water") + } } } @@ -635,8 +682,8 @@ func doMigrateConfig(cmd *cobra.Command, args []string) { if proxy, ok := m(ias)["proxy"]; ok { if address, ok := m(proxy)["address"]; ok { - logger.Info("ias.proxy.address is now ias.proxy_address") - mIAS["proxy_address"] = address + logger.Info("ias.proxy.address is now ias.proxy_addresses") + mIAS["proxy_addresses"] = address delete(m(proxy), "address") } } diff --git a/go/oasis-node/cmd/config/migrate/migrate_test.go b/go/oasis-node/cmd/config/migrate/migrate_test.go index dc6a8d039dd..05d2ae9ecd1 100644 --- a/go/oasis-node/cmd/config/migrate/migrate_test.go +++ b/go/oasis-node/cmd/config/migrate/migrate_test.go @@ -70,6 +70,17 @@ genesis: worker: p2p: port: 9002 + peer_outbound_queue_size: 42 + validate_queue_size: 43 + validate_concurrency: 44 + validate_throttle: 45 + max_num_peers: 46 + peer_grace_period: 47s + connectedness_low_water: 48 + persistent_peers: + - "foo@1.2.3.4:4321" + blocked_peers: + - "1.2.3.4" registration: entity: /storage/node/entity/entity.json @@ -138,6 +149,10 @@ runtime: environment: sgx provisioner: sandboxed + history: + pruner: + strategy: keep_last + sgx: loader: /oasis/bin/oasis-core-runtime-loader @@ -480,13 +495,23 @@ func TestConfigMigrationComplex(t *testing.T) { require.Equal(newConfig.P2P.Port, uint16(9002)) require.Equal(newConfig.P2P.Seeds[0], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/0ZQ3lUG73q5898=@34.86.165.6:26656") require.Equal(newConfig.P2P.Seeds[1], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/0ZQ3lUG73q5898=@34.86.165.6:9200") + require.Equal(newConfig.P2P.Gossipsub.PeerOutboundQueueSize, 42) + require.Equal(newConfig.P2P.Gossipsub.ValidateQueueSize, 43) + require.Equal(newConfig.P2P.Gossipsub.ValidateConcurrency, 44) + require.Equal(newConfig.P2P.Gossipsub.ValidateThrottle, 45) + require.Equal(newConfig.P2P.ConnectionManager.MaxNumPeers, 46) + require.Equal(newConfig.P2P.ConnectionManager.PeerGracePeriod, 47*time.Second) + require.Equal(newConfig.P2P.ConnectionManager.PersistentPeers[0], "foo@1.2.3.4:4321") + require.Equal(newConfig.P2P.ConnectionGater.BlockedPeerIPs[0], "1.2.3.4") + require.Equal(newConfig.P2P.PeerManager.ConnectednessLowWater, 48.0) require.Equal(newConfig.Consensus.P2P.PersistentPeer[0], "INSERT_P2P_PUBKEY_HERE@1.2.3.4:5678") require.Equal(newConfig.Consensus.P2P.UnconditionalPeer[0], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/0ZQ3lUG73q5898=@34.86.165.6:26656") require.Equal(newConfig.Consensus.SentryUpstreamAddresses[0], "INSERT_P2P_PUBKEY_HERE@1.2.3.4:5678") - require.Equal(newConfig.IAS.ProxyAddress, []string{"qwerty@1.2.3.4:4321"}) + require.Equal(newConfig.IAS.ProxyAddresses, []string{"qwerty@1.2.3.4:4321"}) require.Equal(newConfig.Pprof.BindAddress, "0.0.0.0:6666") require.Equal(newConfig.Runtime.Environment, rtConfig.RuntimeEnvironmentSGX) require.Equal(newConfig.Runtime.Provisioner, rtConfig.RuntimeProvisionerSandboxed) + require.Equal(newConfig.Runtime.Prune.Strategy, "keep_last") require.Equal(newConfig.Runtime.SGXLoader, "/oasis/bin/oasis-core-runtime-loader") require.Equal(newConfig.Runtime.Paths[0], "/oasis/runtimes/sapphire-paratime.orc") require.Equal(newConfig.Runtime.Paths[1], "/oasis/runtimes/sapphire-paratime-previous.orc") @@ -529,7 +554,7 @@ func TestConfigMigrationKM(t *testing.T) { require.Equal(newConfig.P2P.Seeds[1], "INSERT_P2P_PUBKEY_HERE@1.2.3.4:9200") require.Equal(newConfig.P2P.Registration.Addresses[0], "4.3.2.1:26656") require.Equal(newConfig.Registration.Entity, "/km/etc/entity/entity.json") - require.Equal(newConfig.IAS.ProxyAddress, []string{"foo@1.2.3.4:5678"}) + require.Equal(newConfig.IAS.ProxyAddresses, []string{"foo@1.2.3.4:5678"}) require.Equal(newConfig.Runtime.Environment, rtConfig.RuntimeEnvironmentSGX) require.Equal(newConfig.Runtime.Provisioner, rtConfig.RuntimeProvisionerSandboxed) require.Equal(newConfig.Runtime.SGXLoader, "/km/bin/oasis-core-runtime-loader") @@ -603,7 +628,7 @@ func TestConfigMigrationDocsParaTime(t *testing.T) { require.Equal(newConfig.P2P.Seeds[0], "H6u9MtuoWRKn5DKSgarj/dzr2Z9BsjuRHgRAoXITOcU=@35.199.49.168:26656") require.Equal(newConfig.P2P.Seeds[1], "H6u9MtuoWRKn5DKSgarj/dzr2Z9BsjuRHgRAoXITOcU=@35.199.49.168:9200") require.Equal(newConfig.Registration.Entity, "/node/entity/entity.json") - require.Equal(newConfig.IAS.ProxyAddress, []string{"asdf@5.4.3.2:1234"}) + require.Equal(newConfig.IAS.ProxyAddresses, []string{"asdf@5.4.3.2:1234"}) require.Equal(newConfig.Runtime.SGXLoader, "/node/bin/oasis-core-runtime-loader") require.Equal(newConfig.Runtime.Paths[0], "/node/runtimes/test.orc") require.Equal(newConfig.Consensus.ListenAddress, "tcp://0.0.0.0:26656") diff --git a/go/oasis-node/cmd/control/control.go b/go/oasis-node/cmd/control/control.go index 36b72d7c771..3adf5a8ab8e 100644 --- a/go/oasis-node/cmd/control/control.go +++ b/go/oasis-node/cmd/control/control.go @@ -46,10 +46,9 @@ var ( } controlClearDeregisterCmd = &cobra.Command{ - Use: "clear-deregister", - Short: "clear the forced node deregistration flag", - Run: doClearDeregister, - Deprecated: "it should not longer be necessary.", + Use: "clear-deregister", + Short: "clear the forced node deregistration flag", + Run: doClearDeregister, } controlUpgradeBinaryCmd = &cobra.Command{ diff --git a/go/oasis-node/cmd/node/unsafe_reset.go b/go/oasis-node/cmd/node/unsafe_reset.go index 4afb61433a1..11dce9f5632 100644 --- a/go/oasis-node/cmd/node/unsafe_reset.go +++ b/go/oasis-node/cmd/node/unsafe_reset.go @@ -44,6 +44,7 @@ var ( nodeStateGlobs = []string{ "persistent-store.*.db", cmtCommon.StateDir, + "tendermint", // XXX: Legacy filename for consensus state directory. filepath.Join(runtimesGlob, history.DbFilename), } diff --git a/go/oasis-test-runner/oasis/network.go b/go/oasis-test-runner/oasis/network.go index 5a7f2d02d05..980516c326e 100644 --- a/go/oasis-test-runner/oasis/network.go +++ b/go/oasis-test-runner/oasis/network.go @@ -640,7 +640,7 @@ func (net *Network) startOasisNode( } if len(subCmd) == 0 { if net.iasProxy != nil { - cfg.IAS.ProxyAddress = []string{fmt.Sprintf("%s@127.0.0.1:%d", net.iasProxy.tlsPublicKey, net.iasProxy.grpcPort)} + cfg.IAS.ProxyAddresses = []string{fmt.Sprintf("%s@127.0.0.1:%d", net.iasProxy.tlsPublicKey, net.iasProxy.grpcPort)} if net.iasProxy.mock { cfg.IAS.DebugSkipVerify = true } diff --git a/go/oasis-test-runner/oasis/oasis.go b/go/oasis-test-runner/oasis/oasis.go index 1a61a580a7c..e486742ca94 100644 --- a/go/oasis-test-runner/oasis/oasis.go +++ b/go/oasis-test-runner/oasis/oasis.go @@ -280,9 +280,9 @@ func (n *Node) Start() error { for _, hosted := range n.hostedRuntimes { if hosted.runtime.pruner.Strategy != "" { - n.Config.Runtime.HistoryPruner.Strategy = hosted.runtime.pruner.Strategy - n.Config.Runtime.HistoryPruner.Interval = hosted.runtime.pruner.Interval - n.Config.Runtime.HistoryPruner.NumKept = hosted.runtime.pruner.NumKept + n.Config.Runtime.Prune.Strategy = hosted.runtime.pruner.Strategy + n.Config.Runtime.Prune.Interval = hosted.runtime.pruner.Interval + n.Config.Runtime.Prune.NumKept = hosted.runtime.pruner.NumKept } n.Config.Runtime.Paths = append(n.Config.Runtime.Paths, hosted.runtime.BundlePaths()...) diff --git a/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go b/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go index e5e652e0770..e4839d88b26 100644 --- a/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go +++ b/go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go @@ -304,10 +304,10 @@ func (sc *Scenario) EnableRuntimeDeployment(ctx context.Context, childEnv *env.E return fmt.Errorf("failed to get current epoch: %w", err) } - // The upgrade epoch should be set to at least 3 to provide compute workers with enough time + // The upgrade epoch should be set to at least 4 to provide compute workers with enough time // to prepare for the upgrade. If it is set too low, the runtime will be suspended due to // a lack of eligible compute workers. - upgradeEpoch := epoch + 3 + upgradeEpoch := epoch + 4 // Fetch old deployment. oldRtDsc, err := sc.Net.Controller().Registry.GetRuntime(ctx, ®istry.GetRuntimeQuery{ diff --git a/go/runtime/config/config.go b/go/runtime/config/config.go index 0295204cc9f..5df80a23876 100644 --- a/go/runtime/config/config.go +++ b/go/runtime/config/config.go @@ -75,7 +75,7 @@ type Config struct { Environment RuntimeEnvironment `yaml:"environment"` // History pruner configuration. - HistoryPruner HistoryPrunerConfig `yaml:"history_pruner,omitempty"` + Prune PruneConfig `yaml:"prune,omitempty"` // Runtime ID -> local config. RuntimeConfig map[string]interface{} `yaml:"config,omitempty"` @@ -92,8 +92,8 @@ type Config struct { PreWarmEpochs uint64 `yaml:"pre_warm_epochs,omitempty"` } -// HistoryPrunerConfig is the history pruner configuration structure. -type HistoryPrunerConfig struct { +// PruneConfig is the history pruner configuration structure. +type PruneConfig struct { // History pruner strategy. Strategy string `yaml:"strategy"` // History pruning interval. @@ -126,11 +126,11 @@ func (c *Config) Validate() error { return fmt.Errorf("unknown runtime environment: %s", c.Environment) } - switch c.HistoryPruner.Strategy { + switch c.Prune.Strategy { case "none": case "keep_last": - if c.HistoryPruner.Interval < 1*time.Second { - return fmt.Errorf("history_pruner.interval must be >= 1 second") + if c.Prune.Interval < 1*time.Second { + return fmt.Errorf("prune.interval must be >= 1 second") } default: return fmt.Errorf("unknown runtime history pruner strategy: %s", c.Environment) @@ -147,7 +147,7 @@ func DefaultConfig() Config { SandboxBinary: "/usr/bin/bwrap", SGXLoader: "", Environment: RuntimeEnvironmentAuto, - HistoryPruner: HistoryPrunerConfig{ + Prune: PruneConfig{ Strategy: "none", Interval: 2 * time.Minute, NumKept: 600, diff --git a/go/runtime/registry/config.go b/go/runtime/registry/config.go index 76c1d164d08..b0d67007a31 100644 --- a/go/runtime/registry/config.go +++ b/go/runtime/registry/config.go @@ -293,18 +293,18 @@ func newConfig(dataDir string, commonStore *persistent.CommonStore, consensus co cfg.Host = &rh } - strategy := config.GlobalConfig.Runtime.HistoryPruner.Strategy + strategy := config.GlobalConfig.Runtime.Prune.Strategy switch strings.ToLower(strategy) { case history.PrunerStrategyNone: cfg.History.Pruner = history.NewNonePruner() case history.PrunerStrategyKeepLast: - numKept := config.GlobalConfig.Runtime.HistoryPruner.NumKept + numKept := config.GlobalConfig.Runtime.Prune.NumKept cfg.History.Pruner = history.NewKeepLastPruner(numKept) default: return nil, fmt.Errorf("runtime/registry: unknown history pruner strategy: %s", strategy) } - cfg.History.PruneInterval = config.GlobalConfig.Runtime.HistoryPruner.Interval + cfg.History.PruneInterval = config.GlobalConfig.Runtime.Prune.Interval const minPruneInterval = 1 * time.Second if cfg.History.PruneInterval < minPruneInterval { cfg.History.PruneInterval = minPruneInterval