Skip to content

Commit

Permalink
Merge pull request #5392 from oasisprotocol/kostko/fix/multiple-fixes
Browse files Browse the repository at this point in the history
Multiple minor fixes
  • Loading branch information
kostko authored Oct 10, 2023
2 parents f7d45e0 + 7f9b81d commit e67f164
Show file tree
Hide file tree
Showing 13 changed files with 103 additions and 32 deletions.
2 changes: 1 addition & 1 deletion .changelog/5070.cfg.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ to log level. Use the `default` module to map the default log level.
- `consensus.tendermint.p2p.unconditional_peer` to
`consensus.p2p.unconditional_peers`.

- `ias.proxy.address` to `ias.proxy_address`.
- `ias.proxy.address` to `ias.proxy_addresses`.

- `ias.debug.skip_verify` to `ias.debug_skip_verify`.

Expand Down
1 change: 0 additions & 1 deletion .changelog/5376.feature.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ The following `oasis-node` subcommands have been deprecated and should no
longer be used in favor of the [Oasis CLI]:

- `consensus *`
- `control clear-deregister` (no longer needed)
- `control runtime-stats`
- `debug bundle *`
- `governance *`
Expand Down
4 changes: 2 additions & 2 deletions go/ias/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ package config
// Config is the IAS configuration structure.
type Config struct {
// IAS proxy address in the form ID@HOST:PORT.
ProxyAddress []string `yaml:"proxy_address"`
ProxyAddresses []string `yaml:"proxy_addresses"`

// Skip IAS AVR signature verification (UNSAFE).
DebugSkipVerify bool `yaml:"debug_skip_verify,omitempty"`
Expand All @@ -18,7 +18,7 @@ func (c *Config) Validate() error {
// DefaultConfig returns the default configuration settings.
func DefaultConfig() Config {
return Config{
ProxyAddress: []string{},
ProxyAddresses: []string{},
DebugSkipVerify: false,
}
}
2 changes: 1 addition & 1 deletion go/ias/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,6 @@ func New(identity *identity.Identity) ([]api.Endpoint, error) {

return client.New(
identity,
config.GlobalConfig.IAS.ProxyAddress,
config.GlobalConfig.IAS.ProxyAddresses,
)
}
55 changes: 51 additions & 4 deletions go/oasis-node/cmd/config/migrate/migrate.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,9 +391,9 @@ func doMigrateConfig(cmd *cobra.Command, args []string) {

if history, ok := m(runtime)["history"]; ok {
if pruner, ok := m(history)["pruner"]; ok {
logger.Info("runtime.history.pruner is now runtime.history_pruner")
logger.Info("runtime.history.pruner is now runtime.prune")
mkSubMap(newCfg, "runtime")
m(newCfg["runtime"])["history_pruner"] = m(pruner)
m(newCfg["runtime"])["prune"] = m(pruner)
delete(m(history), "pruner")
} else {
logger.Warn("input has invalid entries under runtime.history")
Expand Down Expand Up @@ -548,6 +548,53 @@ func doMigrateConfig(cmd *cobra.Command, args []string) {
m(m(newCfg["p2p"])["registration"])["addresses"] = addresses
delete(m(p2p), "addresses")
}

// Migrate gossipsub config.
for _, k := range []string{
"peer_outbound_queue_size",
"validate_queue_size",
"validate_concurrency",
"validate_throttle",
} {
if v, ok := m(p2p)[k]; ok {
logger.Info(fmt.Sprintf("worker.p2p.%s is now p2p.gossipsub.%s", k, k))
mkSubMap(newCfg, "p2p")
mkSubMap(m(newCfg["p2p"]), "gossipsub")
m(m(newCfg["p2p"])["gossipsub"])[k] = v
delete(m(p2p), k)
}
}

// Migrate connection manager config.
for _, k := range []string{
"max_num_peers",
"peer_grace_period",
"persistent_peers",
} {
if v, ok := m(p2p)[k]; ok {
logger.Info(fmt.Sprintf("worker.p2p.%s is now p2p.connection_manager.%s", k, k))
mkSubMap(newCfg, "p2p")
mkSubMap(m(newCfg["p2p"]), "connection_manager")
m(m(newCfg["p2p"])["connection_manager"])[k] = v
delete(m(p2p), k)
}
}

if blocked_peers, ok := m(p2p)["blocked_peers"]; ok {
logger.Info("worker.p2p.blocked_peers is now p2p.connection_gater.blocked_peers")
mkSubMap(newCfg, "p2p")
mkSubMap(m(newCfg["p2p"]), "connection_gater")
m(m(newCfg["p2p"])["connection_gater"])["blocked_peers"] = blocked_peers
delete(m(p2p), "blocked_peers")
}

if connectedness_low_water, ok := m(p2p)["connectedness_low_water"]; ok {
logger.Info("worker.p2p.connectedness_low_water is now p2p.peer_manager.connectedness_low_water")
mkSubMap(newCfg, "p2p")
mkSubMap(m(newCfg["p2p"]), "peer_manager")
m(m(newCfg["p2p"])["peer_manager"])["connectedness_low_water"] = connectedness_low_water
delete(m(p2p), "connectedness_low_water")
}
}
}

Expand Down Expand Up @@ -635,8 +682,8 @@ func doMigrateConfig(cmd *cobra.Command, args []string) {

if proxy, ok := m(ias)["proxy"]; ok {
if address, ok := m(proxy)["address"]; ok {
logger.Info("ias.proxy.address is now ias.proxy_address")
mIAS["proxy_address"] = address
logger.Info("ias.proxy.address is now ias.proxy_addresses")
mIAS["proxy_addresses"] = address
delete(m(proxy), "address")
}
}
Expand Down
31 changes: 28 additions & 3 deletions go/oasis-node/cmd/config/migrate/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,17 @@ genesis:
worker:
p2p:
port: 9002
peer_outbound_queue_size: 42
validate_queue_size: 43
validate_concurrency: 44
validate_throttle: 45
max_num_peers: 46
peer_grace_period: 47s
connectedness_low_water: 48
persistent_peers:
- "[email protected]:4321"
blocked_peers:
- "1.2.3.4"
registration:
entity: /storage/node/entity/entity.json
Expand Down Expand Up @@ -138,6 +149,10 @@ runtime:
environment: sgx
provisioner: sandboxed
history:
pruner:
strategy: keep_last
sgx:
loader: /oasis/bin/oasis-core-runtime-loader
Expand Down Expand Up @@ -480,13 +495,23 @@ func TestConfigMigrationComplex(t *testing.T) {
require.Equal(newConfig.P2P.Port, uint16(9002))
require.Equal(newConfig.P2P.Seeds[0], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/[email protected]:26656")
require.Equal(newConfig.P2P.Seeds[1], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/[email protected]:9200")
require.Equal(newConfig.P2P.Gossipsub.PeerOutboundQueueSize, 42)
require.Equal(newConfig.P2P.Gossipsub.ValidateQueueSize, 43)
require.Equal(newConfig.P2P.Gossipsub.ValidateConcurrency, 44)
require.Equal(newConfig.P2P.Gossipsub.ValidateThrottle, 45)
require.Equal(newConfig.P2P.ConnectionManager.MaxNumPeers, 46)
require.Equal(newConfig.P2P.ConnectionManager.PeerGracePeriod, 47*time.Second)
require.Equal(newConfig.P2P.ConnectionManager.PersistentPeers[0], "[email protected]:4321")
require.Equal(newConfig.P2P.ConnectionGater.BlockedPeerIPs[0], "1.2.3.4")
require.Equal(newConfig.P2P.PeerManager.ConnectednessLowWater, 48.0)
require.Equal(newConfig.Consensus.P2P.PersistentPeer[0], "[email protected]:5678")
require.Equal(newConfig.Consensus.P2P.UnconditionalPeer[0], "HcDFrTp/MqRHtju5bCx6TIhIMd6X/[email protected]:26656")
require.Equal(newConfig.Consensus.SentryUpstreamAddresses[0], "[email protected]:5678")
require.Equal(newConfig.IAS.ProxyAddress, []string{"[email protected]:4321"})
require.Equal(newConfig.IAS.ProxyAddresses, []string{"[email protected]:4321"})
require.Equal(newConfig.Pprof.BindAddress, "0.0.0.0:6666")
require.Equal(newConfig.Runtime.Environment, rtConfig.RuntimeEnvironmentSGX)
require.Equal(newConfig.Runtime.Provisioner, rtConfig.RuntimeProvisionerSandboxed)
require.Equal(newConfig.Runtime.Prune.Strategy, "keep_last")
require.Equal(newConfig.Runtime.SGXLoader, "/oasis/bin/oasis-core-runtime-loader")
require.Equal(newConfig.Runtime.Paths[0], "/oasis/runtimes/sapphire-paratime.orc")
require.Equal(newConfig.Runtime.Paths[1], "/oasis/runtimes/sapphire-paratime-previous.orc")
Expand Down Expand Up @@ -529,7 +554,7 @@ func TestConfigMigrationKM(t *testing.T) {
require.Equal(newConfig.P2P.Seeds[1], "[email protected]:9200")
require.Equal(newConfig.P2P.Registration.Addresses[0], "4.3.2.1:26656")
require.Equal(newConfig.Registration.Entity, "/km/etc/entity/entity.json")
require.Equal(newConfig.IAS.ProxyAddress, []string{"[email protected]:5678"})
require.Equal(newConfig.IAS.ProxyAddresses, []string{"[email protected]:5678"})
require.Equal(newConfig.Runtime.Environment, rtConfig.RuntimeEnvironmentSGX)
require.Equal(newConfig.Runtime.Provisioner, rtConfig.RuntimeProvisionerSandboxed)
require.Equal(newConfig.Runtime.SGXLoader, "/km/bin/oasis-core-runtime-loader")
Expand Down Expand Up @@ -603,7 +628,7 @@ func TestConfigMigrationDocsParaTime(t *testing.T) {
require.Equal(newConfig.P2P.Seeds[0], "H6u9MtuoWRKn5DKSgarj/[email protected]:26656")
require.Equal(newConfig.P2P.Seeds[1], "H6u9MtuoWRKn5DKSgarj/[email protected]:9200")
require.Equal(newConfig.Registration.Entity, "/node/entity/entity.json")
require.Equal(newConfig.IAS.ProxyAddress, []string{"[email protected]:1234"})
require.Equal(newConfig.IAS.ProxyAddresses, []string{"[email protected]:1234"})
require.Equal(newConfig.Runtime.SGXLoader, "/node/bin/oasis-core-runtime-loader")
require.Equal(newConfig.Runtime.Paths[0], "/node/runtimes/test.orc")
require.Equal(newConfig.Consensus.ListenAddress, "tcp://0.0.0.0:26656")
Expand Down
7 changes: 3 additions & 4 deletions go/oasis-node/cmd/control/control.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,9 @@ var (
}

controlClearDeregisterCmd = &cobra.Command{
Use: "clear-deregister",
Short: "clear the forced node deregistration flag",
Run: doClearDeregister,
Deprecated: "it should not longer be necessary.",
Use: "clear-deregister",
Short: "clear the forced node deregistration flag",
Run: doClearDeregister,
}

controlUpgradeBinaryCmd = &cobra.Command{
Expand Down
1 change: 1 addition & 0 deletions go/oasis-node/cmd/node/unsafe_reset.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ var (
nodeStateGlobs = []string{
"persistent-store.*.db",
cmtCommon.StateDir,
"tendermint", // XXX: Legacy filename for consensus state directory.
filepath.Join(runtimesGlob, history.DbFilename),
}

Expand Down
2 changes: 1 addition & 1 deletion go/oasis-test-runner/oasis/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,7 @@ func (net *Network) startOasisNode(
}
if len(subCmd) == 0 {
if net.iasProxy != nil {
cfg.IAS.ProxyAddress = []string{fmt.Sprintf("%[email protected]:%d", net.iasProxy.tlsPublicKey, net.iasProxy.grpcPort)}
cfg.IAS.ProxyAddresses = []string{fmt.Sprintf("%[email protected]:%d", net.iasProxy.tlsPublicKey, net.iasProxy.grpcPort)}
if net.iasProxy.mock {
cfg.IAS.DebugSkipVerify = true
}
Expand Down
6 changes: 3 additions & 3 deletions go/oasis-test-runner/oasis/oasis.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,9 +280,9 @@ func (n *Node) Start() error {

for _, hosted := range n.hostedRuntimes {
if hosted.runtime.pruner.Strategy != "" {
n.Config.Runtime.HistoryPruner.Strategy = hosted.runtime.pruner.Strategy
n.Config.Runtime.HistoryPruner.Interval = hosted.runtime.pruner.Interval
n.Config.Runtime.HistoryPruner.NumKept = hosted.runtime.pruner.NumKept
n.Config.Runtime.Prune.Strategy = hosted.runtime.pruner.Strategy
n.Config.Runtime.Prune.Interval = hosted.runtime.pruner.Interval
n.Config.Runtime.Prune.NumKept = hosted.runtime.pruner.NumKept
}

n.Config.Runtime.Paths = append(n.Config.Runtime.Paths, hosted.runtime.BundlePaths()...)
Expand Down
4 changes: 2 additions & 2 deletions go/oasis-test-runner/scenario/e2e/runtime/helpers_runtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,10 +304,10 @@ func (sc *Scenario) EnableRuntimeDeployment(ctx context.Context, childEnv *env.E
return fmt.Errorf("failed to get current epoch: %w", err)
}

// The upgrade epoch should be set to at least 3 to provide compute workers with enough time
// The upgrade epoch should be set to at least 4 to provide compute workers with enough time
// to prepare for the upgrade. If it is set too low, the runtime will be suspended due to
// a lack of eligible compute workers.
upgradeEpoch := epoch + 3
upgradeEpoch := epoch + 4

// Fetch old deployment.
oldRtDsc, err := sc.Net.Controller().Registry.GetRuntime(ctx, &registry.GetRuntimeQuery{
Expand Down
14 changes: 7 additions & 7 deletions go/runtime/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ type Config struct {
Environment RuntimeEnvironment `yaml:"environment"`

// History pruner configuration.
HistoryPruner HistoryPrunerConfig `yaml:"history_pruner,omitempty"`
Prune PruneConfig `yaml:"prune,omitempty"`

// Runtime ID -> local config.
RuntimeConfig map[string]interface{} `yaml:"config,omitempty"`
Expand All @@ -92,8 +92,8 @@ type Config struct {
PreWarmEpochs uint64 `yaml:"pre_warm_epochs,omitempty"`
}

// HistoryPrunerConfig is the history pruner configuration structure.
type HistoryPrunerConfig struct {
// PruneConfig is the history pruner configuration structure.
type PruneConfig struct {
// History pruner strategy.
Strategy string `yaml:"strategy"`
// History pruning interval.
Expand Down Expand Up @@ -126,11 +126,11 @@ func (c *Config) Validate() error {
return fmt.Errorf("unknown runtime environment: %s", c.Environment)
}

switch c.HistoryPruner.Strategy {
switch c.Prune.Strategy {
case "none":
case "keep_last":
if c.HistoryPruner.Interval < 1*time.Second {
return fmt.Errorf("history_pruner.interval must be >= 1 second")
if c.Prune.Interval < 1*time.Second {
return fmt.Errorf("prune.interval must be >= 1 second")
}
default:
return fmt.Errorf("unknown runtime history pruner strategy: %s", c.Environment)
Expand All @@ -147,7 +147,7 @@ func DefaultConfig() Config {
SandboxBinary: "/usr/bin/bwrap",
SGXLoader: "",
Environment: RuntimeEnvironmentAuto,
HistoryPruner: HistoryPrunerConfig{
Prune: PruneConfig{
Strategy: "none",
Interval: 2 * time.Minute,
NumKept: 600,
Expand Down
6 changes: 3 additions & 3 deletions go/runtime/registry/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,18 +293,18 @@ func newConfig(dataDir string, commonStore *persistent.CommonStore, consensus co
cfg.Host = &rh
}

strategy := config.GlobalConfig.Runtime.HistoryPruner.Strategy
strategy := config.GlobalConfig.Runtime.Prune.Strategy
switch strings.ToLower(strategy) {
case history.PrunerStrategyNone:
cfg.History.Pruner = history.NewNonePruner()
case history.PrunerStrategyKeepLast:
numKept := config.GlobalConfig.Runtime.HistoryPruner.NumKept
numKept := config.GlobalConfig.Runtime.Prune.NumKept
cfg.History.Pruner = history.NewKeepLastPruner(numKept)
default:
return nil, fmt.Errorf("runtime/registry: unknown history pruner strategy: %s", strategy)
}

cfg.History.PruneInterval = config.GlobalConfig.Runtime.HistoryPruner.Interval
cfg.History.PruneInterval = config.GlobalConfig.Runtime.Prune.Interval
const minPruneInterval = 1 * time.Second
if cfg.History.PruneInterval < minPruneInterval {
cfg.History.PruneInterval = minPruneInterval
Expand Down

0 comments on commit e67f164

Please sign in to comment.