From e36a9a5499960e6bea7d86f568bdd7688a2f7030 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 5 May 2024 22:28:11 +0300 Subject: [PATCH 01/35] removed pending block as submit to SL retries forever added total produced size accumolator to be used as submission trigger --- block/manager.go | 43 +++++++++++----------------- block/produce.go | 20 +++++++++++++ block/submit.go | 66 +++++++++++++++++-------------------------- block/submit_test.go | 5 ++-- config/config.go | 5 ++++ config/config_test.go | 4 +-- testutil/block.go | 2 +- 7 files changed, 74 insertions(+), 71 deletions(-) diff --git a/block/manager.go b/block/manager.go index 5f59f2ddd..3896a9c61 100644 --- a/block/manager.go +++ b/block/manager.go @@ -58,14 +58,11 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production - shouldProduceBlocksCh chan bool - produceEmptyBlockCh chan bool - lastSubmissionTime atomic.Int64 - - /* - Guard against triggering a new batch submission when the old one is still going on (taking a while) - */ - submitBatchMutex sync.Mutex + accumulatedProducedSize uint64 + shouldProduceBlocksCh chan bool + shouldSubmitBatchCh chan bool + produceEmptyBlockCh chan bool + lastSubmissionTime atomic.Int64 /* Protect against producing two blocks at once if the first one is taking a while @@ -80,12 +77,6 @@ type Manager struct { */ retrieverMutex sync.Mutex - // pendingBatch is the result of the last DA submission - // that is pending settlement layer submission. - // It is used to avoid double submission of the same batch. - // It's protected by submitBatchMutex. - pendingBatch *PendingBatch - logger types.Logger // Cached blocks and commits for applying at future heights. The blocks may not be valid, because @@ -123,20 +114,20 @@ func NewManager( } agg := &Manager{ - Pubsub: pubsub, - p2pClient: p2pClient, - ProposerKey: proposerKey, - Conf: conf, - Genesis: genesis, - LastState: s, - Store: store, - Executor: exec, - DAClient: dalc, - SLClient: settlementClient, - Retriever: dalc.(da.BatchRetriever), - // channels are buffered to avoid blocking on input/output operations, buffer sizes are arbitrary + Pubsub: pubsub, + p2pClient: p2pClient, + ProposerKey: proposerKey, + Conf: conf, + Genesis: genesis, + LastState: s, + Store: store, + Executor: exec, + DAClient: dalc, + SLClient: settlementClient, + Retriever: dalc.(da.BatchRetriever), SyncTargetDiode: diodes.NewOneToOne(1, nil), shouldProduceBlocksCh: make(chan bool, 1), + shouldSubmitBatchCh: make(chan bool, 10), //allow capacity for multiple pending batches to support bursts produceEmptyBlockCh: make(chan bool, 1), logger: logger, blockCache: make(map[uint64]CachedBlock), diff --git a/block/produce.go b/block/produce.go index 89172609c..7ae3f936c 100644 --- a/block/produce.go +++ b/block/produce.go @@ -82,6 +82,9 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er return fmt.Errorf("produce block: %w", err) } + size := uint64(block.ToProto().Size() + commit.ToProto().Size()) + _ = m.updateAccumaltedSize(size) + if err := m.gossipBlock(ctx, *block, *commit); err != nil { return fmt.Errorf("gossip block: %w", err) } @@ -89,6 +92,23 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er return nil } +func (m *Manager) updateAccumaltedSize(size uint64) bool { + m.accumulatedProducedSize += size + + // Check if accumulated size is greater than the max size + // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) + if m.accumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes { + select { + case m.shouldSubmitBatchCh <- true: + default: + m.logger.Debug("new batch accumualted, but channel is full, skipping submission signal") + } + m.accumulatedProducedSize = 0 + return true + } + return false +} + func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { m.produceBlockMutex.Lock() defer m.produceBlockMutex.Unlock() diff --git a/block/submit.go b/block/submit.go index e374b997e..648c1b99c 100644 --- a/block/submit.go +++ b/block/submit.go @@ -17,25 +17,27 @@ func (m *Manager) SubmitLoop(ctx context.Context) { ticker := time.NewTicker(m.Conf.BatchSubmitMaxTime) defer ticker.Stop() - // TODO: add submission trigger by batch size (should be signaled from the the block production) for { + var err error select { // Context canceled case <-ctx.Done(): return + // Trigger by block production + case <-m.shouldSubmitBatchCh: + err = m.HandleSubmissionTrigger(ctx) // trigger by time case <-ticker.C: - err := m.HandleSubmissionTrigger(ctx) - if errors.Is(err, gerr.ErrAborted) { - continue - } - if errors.Is(err, gerr.ErrUnauthenticated) { - panic(fmt.Errorf("handle submission trigger: %w", err)) - } - if err != nil { - m.logger.Error("handle submission trigger", "error", err) - } + err = m.HandleSubmissionTrigger(ctx) } + if err == nil { + continue + } + if errors.Is(err, gerr.ErrUnauthenticated) { + panic(fmt.Errorf("handle submission trigger: %w", err)) + } + + m.logger.Error("handle submission trigger", "error", err) } } @@ -44,11 +46,6 @@ func (m *Manager) SubmitLoop(ctx context.Context) { // pass through during the batch submission process due to proofs requires for ibc messages only exist on the next block. // Finally, it submits the next batch of blocks and updates the sync target to the height of the last block in the submitted batch. func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { - if !m.submitBatchMutex.TryLock() { - return fmt.Errorf("batch submission already in process, skipping submission: %w", gerr.ErrAborted) - } - defer m.submitBatchMutex.Unlock() - // Load current sync target and height to determine if new blocks are available for submission. if m.Store.Height() <= m.SyncTarget.Load() { return nil // No new blocks have been produced @@ -60,31 +57,20 @@ func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { m.logger.Error("Produce and gossip empty block.", "error", err) } - if m.pendingBatch == nil { - nextBatch, err := m.createNextBatch() - if err != nil { - return fmt.Errorf("create next batch: %w", err) - } - - resultSubmitToDA, err := m.submitNextBatchToDA(nextBatch) - if err != nil { - return fmt.Errorf("submit next batch to da: %w", err) - } - - m.pendingBatch = &PendingBatch{ - daResult: resultSubmitToDA, - batch: nextBatch, - } - } else { - m.logger.Info("Pending batch already exists.", "startHeight", m.pendingBatch.batch.StartHeight, "endHeight", m.pendingBatch.batch.EndHeight) + nextBatch, err := m.createNextBatch() + if err != nil { + return fmt.Errorf("create next batch: %w", err) } - syncHeight, err := m.submitPendingBatchToSL(*m.pendingBatch) + resultSubmitToDA, err := m.submitNextBatchToDA(nextBatch) if err != nil { - return fmt.Errorf("submit pending batch to sl: %w", err) + return fmt.Errorf("submit next batch to da: %w", err) } - m.pendingBatch = nil + syncHeight, err := m.submitPendingBatchToSL(nextBatch, resultSubmitToDA) + if err != nil { + panic(fmt.Errorf("submit pending batch to sl: %w", err)) + } // Update the syncTarget to the height of the last block in the last batch as seen by this node. m.UpdateSyncParams(syncHeight) @@ -135,10 +121,10 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB return &resultSubmitToDA, nil } -func (m *Manager) submitPendingBatchToSL(p PendingBatch) (uint64, error) { - startHeight := p.batch.StartHeight - actualEndHeight := p.batch.EndHeight - err := m.SLClient.SubmitBatch(p.batch, m.DAClient.GetClientType(), p.daResult) +func (m *Manager) submitPendingBatchToSL(batch *types.Batch, daResult *da.ResultSubmitBatch) (uint64, error) { + startHeight := batch.StartHeight + actualEndHeight := batch.EndHeight + err := m.SLClient.SubmitBatch(batch, m.DAClient.GetClientType(), daResult) if err != nil { return 0, fmt.Errorf("sl client submit batch: startheight: %d: actual end height: %d: %w", startHeight, actualEndHeight, err) } diff --git a/block/submit_test.go b/block/submit_test.go index ea913415c..9284012b9 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -96,8 +96,9 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { // try to submit, we expect failure mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("Failed to submit batch")).Once() - manager.HandleSubmissionTrigger(ctx) - assert.EqualValues(t, 0, manager.SyncTarget.Load()) + assert.Panics(t, func() { + manager.HandleSubmissionTrigger(ctx) + }) // try to submit again, we expect success mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() diff --git a/config/config.go b/config/config.go index c881a9acc..e620e45f8 100644 --- a/config/config.go +++ b/config/config.go @@ -49,6 +49,7 @@ type BlockManagerConfig struct { BatchSubmitMaxTime time.Duration `mapstructure:"batch_submit_max_time"` NamespaceID string `mapstructure:"namespace_id"` // The size of the batch in blocks. Every batch we'll write to the DA and the settlement layer. + //TODO: remove BlockBatchSize uint64 `mapstructure:"block_batch_size"` // The size of the batch in Bytes. Every batch we'll write to the DA and the settlement layer. BlockBatchMaxSizeBytes uint64 `mapstructure:"block_batch_max_size_bytes"` @@ -133,6 +134,10 @@ func (c BlockManagerConfig) Validate() error { return fmt.Errorf("batch_submit_max_time must be greater than block_time") } + if c.BatchSubmitMaxTime < c.EmptyBlocksMaxTime { + return fmt.Errorf("batch_submit_max_time must be greater than empty_blocks_max_time") + } + if c.BlockBatchSize <= 0 { return fmt.Errorf("block_batch_size must be positive") } diff --git a/config/config_test.go b/config/config_test.go index 36fbc8206..023370bdd 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -202,8 +202,8 @@ func fullNodeConfig() config.NodeConfig { return config.NodeConfig{ BlockManagerConfig: config.BlockManagerConfig{ BlockTime: 1 * time.Second, - EmptyBlocksMaxTime: 2 * time.Second, - BatchSubmitMaxTime: 1 * time.Second, + EmptyBlocksMaxTime: 20 * time.Second, + BatchSubmitMaxTime: 20 * time.Second, NamespaceID: "test", BlockBatchSize: 1, BlockBatchMaxSizeBytes: 1, diff --git a/testutil/block.go b/testutil/block.go index 25ead4119..c3631a3c6 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -148,7 +148,7 @@ func GetManagerConfig() config.BlockManagerConfig { return config.BlockManagerConfig{ BlockTime: 100 * time.Millisecond, BlockBatchSize: DefaultTestBatchSize, - BlockBatchMaxSizeBytes: 1000, + BlockBatchMaxSizeBytes: 1000000, BatchSubmitMaxTime: 30 * time.Minute, NamespaceID: "0102030405060708", GossipedBlocksCacheSize: 50, From 0d4c64131cd72dd380ec7330e36223d1d37a7b88 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 5 May 2024 22:36:12 +0300 Subject: [PATCH 02/35] removed BlockBatchSize and fix UT --- block/manager_test.go | 1 - block/submit_test.go | 6 ++---- block/types.go | 6 ------ config/config.go | 7 ------- config/config_test.go | 14 +++----------- config/defaults.go | 1 - config/flags.go | 5 ----- config/toml.go | 6 +++--- node/integration_test.go | 1 - node/node_test.go | 1 - rpc/client/client_test.go | 6 ------ rpc/json/service_test.go | 1 - testutil/block.go | 1 - testutil/node.go | 1 - testutil/types.go | 2 -- 15 files changed, 8 insertions(+), 51 deletions(-) diff --git a/block/manager_test.go b/block/manager_test.go index d8f2eee57..73c660f8c 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -408,7 +408,6 @@ func TestCreateNextDABatchWithBytesLimit(t *testing.T) { require.NoError(err) // Init manager managerConfig := testutil.GetManagerConfig() - managerConfig.BlockBatchSize = 1000 managerConfig.BlockBatchMaxSizeBytes = batchLimitBytes // enough for 2 block, not enough for 10 blocks manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, proxyApp, nil) require.NoError(err) diff --git a/block/submit_test.go b/block/submit_test.go index 9284012b9..1e02589ed 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -50,7 +50,7 @@ func TestBatchSubmissionHappyFlow(t *testing.T) { // submit and validate sync target manager.HandleSubmissionTrigger(ctx) - assert.EqualValues(t, 1, manager.SyncTarget.Load()) + assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) } func TestBatchSubmissionFailedSubmission(t *testing.T) { @@ -103,13 +103,12 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { // try to submit again, we expect success mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() manager.HandleSubmissionTrigger(ctx) - assert.EqualValues(t, 1, manager.SyncTarget.Load()) + assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) } func TestBatchSubmissionAfterTimeout(t *testing.T) { const ( // large batch size, so we expect the trigger to be the timeout - batchSize = 100000 submitTimeout = 2 * time.Second blockTime = 200 * time.Millisecond runTime = submitTimeout + 1*time.Second @@ -128,7 +127,6 @@ func TestBatchSubmissionAfterTimeout(t *testing.T) { BlockTime: blockTime, EmptyBlocksMaxTime: 0, BatchSubmitMaxTime: submitTimeout, - BlockBatchSize: batchSize, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, } diff --git a/block/types.go b/block/types.go index 199b80782..3f5744f8f 100644 --- a/block/types.go +++ b/block/types.go @@ -1,7 +1,6 @@ package block import ( - "github.com/dymensionxyz/dymint/da" "github.com/dymensionxyz/dymint/types" ) @@ -19,11 +18,6 @@ type blockMetaData struct { daHeight uint64 } -type PendingBatch struct { - daResult *da.ResultSubmitBatch - batch *types.Batch -} - type CachedBlock struct { Block *types.Block Commit *types.Commit diff --git a/config/config.go b/config/config.go index e620e45f8..94cb47db5 100644 --- a/config/config.go +++ b/config/config.go @@ -48,9 +48,6 @@ type BlockManagerConfig struct { // BatchSubmitMaxTime defines how long should block manager wait for before submitting batch BatchSubmitMaxTime time.Duration `mapstructure:"batch_submit_max_time"` NamespaceID string `mapstructure:"namespace_id"` - // The size of the batch in blocks. Every batch we'll write to the DA and the settlement layer. - //TODO: remove - BlockBatchSize uint64 `mapstructure:"block_batch_size"` // The size of the batch in Bytes. Every batch we'll write to the DA and the settlement layer. BlockBatchMaxSizeBytes uint64 `mapstructure:"block_batch_max_size_bytes"` // The number of messages cached by gossipsub protocol @@ -138,10 +135,6 @@ func (c BlockManagerConfig) Validate() error { return fmt.Errorf("batch_submit_max_time must be greater than empty_blocks_max_time") } - if c.BlockBatchSize <= 0 { - return fmt.Errorf("block_batch_size must be positive") - } - if c.BlockBatchMaxSizeBytes <= 0 { return fmt.Errorf("block_batch_size_bytes must be positive") } diff --git a/config/config_test.go b/config/config_test.go index 023370bdd..edcfc6683 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,7 +1,6 @@ package config_test import ( - "fmt" "testing" "time" @@ -31,7 +30,7 @@ func TestViperAndCobra(t *testing.T) { assert.NoError(cmd.Flags().Set(config.FlagEmptyBlocksMaxTime, "2000s")) assert.NoError(cmd.Flags().Set(config.FlagBatchSubmitMaxTime, "3000s")) assert.NoError(cmd.Flags().Set(config.FlagNamespaceID, "0102030405060708")) - assert.NoError(cmd.Flags().Set(config.FlagBlockBatchSize, "10")) + assert.NoError(cmd.Flags().Set(config.FlagBlockBatchMaxSizeBytes, "1000")) assert.NoError(nc.GetViperConfig(cmd, dir)) @@ -40,7 +39,7 @@ func TestViperAndCobra(t *testing.T) { assert.Equal(`{"json":true}`, nc.DAConfig) assert.Equal(1234*time.Second, nc.BlockTime) assert.Equal("0102030405060708", nc.NamespaceID) - assert.Equal(uint64(10), nc.BlockBatchSize) + assert.Equal(uint64(1000), nc.BlockManagerConfig.BlockBatchMaxSizeBytes) } func TestNodeConfig_Validate(t *testing.T) { @@ -85,12 +84,6 @@ func TestNodeConfig_Validate(t *testing.T) { nc.BlockManagerConfig.BlockTime = 2 }, wantErr: assert.Error, - }, { - name: "missing block batch size", - malleate: func(nc *config.NodeConfig) { - nc.BlockManagerConfig.BlockBatchSize = 0 - }, - wantErr: assert.Error, }, { name: "missing block batch max size bytes", malleate: func(nc *config.NodeConfig) { @@ -193,7 +186,7 @@ func TestNodeConfig_Validate(t *testing.T) { if tt.malleate != nil { tt.malleate(&nc) } - tt.wantErr(t, nc.Validate(), fmt.Sprintf("Validate()")) + tt.wantErr(t, nc.Validate(), "Validate()") }) } } @@ -205,7 +198,6 @@ func fullNodeConfig() config.NodeConfig { EmptyBlocksMaxTime: 20 * time.Second, BatchSubmitMaxTime: 20 * time.Second, NamespaceID: "test", - BlockBatchSize: 1, BlockBatchMaxSizeBytes: 1, GossipedBlocksCacheSize: 1, }, diff --git a/config/defaults.go b/config/defaults.go index 7b43205fe..94e7cbfea 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -32,7 +32,6 @@ func DefaultConfig(home, chainId string) *NodeConfig { EmptyBlocksMaxTime: 3600 * time.Second, BatchSubmitMaxTime: 100 * time.Second, NamespaceID: "0000000000000000ffff", - BlockBatchSize: 500, BlockBatchMaxSizeBytes: 500000, GossipedBlocksCacheSize: 50, }, diff --git a/config/flags.go b/config/flags.go index 4f10a0ddb..411b8c521 100644 --- a/config/flags.go +++ b/config/flags.go @@ -14,7 +14,6 @@ const ( FlagEmptyBlocksMaxTime = "dymint.empty_blocks_max_time" FlagBatchSubmitMaxTime = "dymint.batch_submit_max_time" FlagNamespaceID = "dymint.namespace_id" - FlagBlockBatchSize = "dymint.block_batch_size" FlagBlockBatchMaxSizeBytes = "dymint.block_batch_max_size_bytes" ) @@ -46,7 +45,6 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().Duration(FlagEmptyBlocksMaxTime, def.EmptyBlocksMaxTime, "max time for empty blocks (for aggregator mode)") cmd.Flags().Duration(FlagBatchSubmitMaxTime, def.BatchSubmitMaxTime, "max time for batch submit (for aggregator mode)") cmd.Flags().String(FlagNamespaceID, def.NamespaceID, "namespace identifies (8 bytes in hex)") - cmd.Flags().Uint64(FlagBlockBatchSize, def.BlockBatchSize, "block batch size") cmd.Flags().Uint64(FlagBlockBatchMaxSizeBytes, def.BlockBatchMaxSizeBytes, "block batch size in bytes") cmd.Flags().String(FlagSettlementLayer, def.SettlementLayer, "Settlement Layer Client name") @@ -82,9 +80,6 @@ func BindDymintFlags(cmd *cobra.Command, v *viper.Viper) error { if err := v.BindPFlag("namespace_id", cmd.Flags().Lookup(FlagNamespaceID)); err != nil { return err } - if err := v.BindPFlag("block_batch_size", cmd.Flags().Lookup(FlagBlockBatchSize)); err != nil { - return err - } if err := v.BindPFlag("block_batch_max_size_bytes", cmd.Flags().Lookup(FlagBlockBatchMaxSizeBytes)); err != nil { return err } diff --git a/config/toml.go b/config/toml.go index 75b0e1e28..c98da9720 100644 --- a/config/toml.go +++ b/config/toml.go @@ -73,16 +73,16 @@ block_time = "{{ .BlockManagerConfig.BlockTime }}" empty_blocks_max_time = "{{ .BlockManagerConfig.EmptyBlocksMaxTime }}" # triggers to submit batch to DA and settlement (both required) -block_batch_size = {{ .BlockManagerConfig.BlockBatchSize }} batch_submit_max_time = "{{ .BlockManagerConfig.BatchSubmitMaxTime }}" +# max size of batch in bytes that can be accepted by DA +block_batch_max_size_bytes = {{ .BlockManagerConfig.BlockBatchMaxSizeBytes }} + ### da config ### da_layer = "{{ .DALayer }}" # mock, celestia, avail namespace_id = "{{ .BlockManagerConfig.NamespaceID }}" da_config = "{{ .DAConfig }}" -# max size of batch in bytes that can be accepted by DA -block_batch_max_size_bytes = {{ .BlockManagerConfig.BlockBatchMaxSizeBytes }} # max number of cached messages by gossipsub protocol gossiped_blocks_cache_size = {{ .BlockManagerConfig.GossipedBlocksCacheSize }} diff --git a/node/integration_test.go b/node/integration_test.go index fc2b4a237..c7bd9e4fa 100644 --- a/node/integration_test.go +++ b/node/integration_test.go @@ -44,7 +44,6 @@ func TestAggregatorMode(t *testing.T) { proposerKey := hex.EncodeToString(pubkeyBytes) blockManagerConfig := config.BlockManagerConfig{ - BlockBatchSize: 1, BlockTime: 1 * time.Second, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, diff --git a/node/node_test.go b/node/node_test.go index 6c126fdc4..439137c36 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -71,7 +71,6 @@ func TestMempoolDirectly(t *testing.T) { Aggregator: false, BlockManagerConfig: config.BlockManagerConfig{ BlockTime: 100 * time.Millisecond, - BlockBatchSize: 2, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, diff --git a/rpc/client/client_test.go b/rpc/client/client_test.go index 006dd4910..06abb7c24 100644 --- a/rpc/client/client_test.go +++ b/rpc/client/client_test.go @@ -103,7 +103,6 @@ func TestGenesisChunked(t *testing.T) { Aggregator: false, BlockManagerConfig: config.BlockManagerConfig{ BlockTime: 100 * time.Millisecond, - BlockBatchSize: 1, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, @@ -457,7 +456,6 @@ func TestTx(t *testing.T) { SettlementLayer: "mock", Aggregator: true, BlockManagerConfig: config.BlockManagerConfig{ - BlockBatchSize: 1, BlockTime: 200 * time.Millisecond, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, @@ -731,7 +729,6 @@ func TestValidatorSetHandling(t *testing.T) { Aggregator: true, BlockManagerConfig: config.BlockManagerConfig{ BlockTime: 10 * time.Millisecond, - BlockBatchSize: 1, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, @@ -865,7 +862,6 @@ func getRPC(t *testing.T) (*tmmocks.MockApplication, *Client) { Aggregator: false, BlockManagerConfig: config.BlockManagerConfig{ BlockTime: 100 * time.Millisecond, - BlockBatchSize: 1, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, @@ -971,7 +967,6 @@ func TestMempool2Nodes(t *testing.T) { RollappID: rollappID, }, BlockManagerConfig: config.BlockManagerConfig{ - BlockBatchSize: 1, BlockTime: 100 * time.Millisecond, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, @@ -993,7 +988,6 @@ func TestMempool2Nodes(t *testing.T) { RollappID: rollappID, }, BlockManagerConfig: config.BlockManagerConfig{ - BlockBatchSize: 1, BlockTime: 100 * time.Millisecond, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, diff --git a/rpc/json/service_test.go b/rpc/json/service_test.go index 4ba39840f..a7161f4bb 100644 --- a/rpc/json/service_test.go +++ b/rpc/json/service_test.go @@ -306,7 +306,6 @@ func getRPC(t *testing.T) (*tmmocks.MockApplication, *client.Client) { EmptyBlocksMaxTime: 0, BatchSubmitMaxTime: 30 * time.Minute, NamespaceID: "0102030405060708", - BlockBatchSize: 10000, BlockBatchMaxSizeBytes: 1000, GossipedBlocksCacheSize: 50, }, diff --git a/testutil/block.go b/testutil/block.go index c3631a3c6..f61c6f947 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -147,7 +147,6 @@ func initSettlementLayerMock(settlementlc settlement.LayerI, proposer string, pu func GetManagerConfig() config.BlockManagerConfig { return config.BlockManagerConfig{ BlockTime: 100 * time.Millisecond, - BlockBatchSize: DefaultTestBatchSize, BlockBatchMaxSizeBytes: 1000000, BatchSubmitMaxTime: 30 * time.Minute, NamespaceID: "0102030405060708", diff --git a/testutil/node.go b/testutil/node.go index 7bf7b6b31..3ec69c3b7 100644 --- a/testutil/node.go +++ b/testutil/node.go @@ -28,7 +28,6 @@ func CreateNode(isAggregator bool, blockManagerConfig *config.BlockManagerConfig if blockManagerConfig == nil { blockManagerConfig = &config.BlockManagerConfig{ - BlockBatchSize: 1, BlockTime: 100 * time.Millisecond, BatchSubmitMaxTime: 60 * time.Second, BlockBatchMaxSizeBytes: 1000, diff --git a/testutil/types.go b/testutil/types.go index 2362130c9..4a00e24d2 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -17,8 +17,6 @@ import ( ) const ( - // DefaultBatchSize is the default batch size for testing - DefaultBatchSize = 5 // BlockVersion is the default block version for testing BlockVersion = 1 // AppVersion is the default app version for testing From 5803cda12395741ef2fc7a5abe36fac9c1dbf384 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 11:35:00 +0300 Subject: [PATCH 03/35] moved accumulated count to produce to be mutex protected --- block/produce.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/produce.go b/block/produce.go index 7ae3f936c..9a1be7722 100644 --- a/block/produce.go +++ b/block/produce.go @@ -82,9 +82,6 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er return fmt.Errorf("produce block: %w", err) } - size := uint64(block.ToProto().Size() + commit.ToProto().Size()) - _ = m.updateAccumaltedSize(size) - if err := m.gossipBlock(ctx, *block, *commit); err != nil { return fmt.Errorf("gossip block: %w", err) } @@ -193,7 +190,10 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er return nil, nil, fmt.Errorf("apply block: %w: %w", err, ErrNonRecoverable) } - m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs)) + size := uint64(block.ToProto().Size() + commit.ToProto().Size()) + _ = m.updateAccumaltedSize(size) + + m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.accumulatedProducedSize) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) types.RollappBlockSizeTxsGauge.Set(float64(len(block.Data.Txs))) types.RollappHeightGauge.Set(float64(newHeight)) From aef3bc4c8b4e742dd3a0b1626a5b56aeb1c0c73c Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 12:56:05 +0300 Subject: [PATCH 04/35] refactored error handling --- block/manager.go | 1 + block/manager_test.go | 4 ++-- block/submit.go | 44 +++++++++++++++++-------------------------- 3 files changed, 20 insertions(+), 29 deletions(-) diff --git a/block/manager.go b/block/manager.go index 3896a9c61..bf30498ab 100644 --- a/block/manager.go +++ b/block/manager.go @@ -58,6 +58,7 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production + //TODO: populate the accumualtedSize on startup accumulatedProducedSize uint64 shouldProduceBlocksCh chan bool shouldSubmitBatchCh chan bool diff --git a/block/manager_test.go b/block/manager_test.go index 73c660f8c..8e92825e3 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -443,7 +443,7 @@ func TestCreateNextDABatchWithBytesLimit(t *testing.T) { // Call createNextDABatch function startHeight := manager.SyncTarget.Load() + 1 endHeight := startHeight + uint64(tc.blocksToProduce) - 1 - batch, err := manager.CreateNextDABatch(startHeight, endHeight) + batch, err := manager.CreateNextBatchToSubmit(startHeight, endHeight) assert.NoError(err) assert.Equal(batch.StartHeight, startHeight) @@ -458,7 +458,7 @@ func TestCreateNextDABatchWithBytesLimit(t *testing.T) { // validate next added block to batch would have been actually too big // First relax the byte limit so we could proudce larger batch manager.Conf.BlockBatchMaxSizeBytes = 10 * manager.Conf.BlockBatchMaxSizeBytes - newBatch, err := manager.CreateNextDABatch(startHeight, batch.EndHeight+1) + newBatch, err := manager.CreateNextBatchToSubmit(startHeight, batch.EndHeight+1) assert.Greater(newBatch.ToProto().Size(), batchLimitBytes) assert.NoError(err) diff --git a/block/submit.go b/block/submit.go index 648c1b99c..d7fe3e141 100644 --- a/block/submit.go +++ b/block/submit.go @@ -2,42 +2,36 @@ package block import ( "context" - "errors" "fmt" "time" - "github.com/dymensionxyz/dymint/gerr" - "github.com/dymensionxyz/dymint/da" "github.com/dymensionxyz/dymint/types" ) -// SubmitLoop submits a batch of blocks to the DA and SL layers on a time interval. +// SubmitLoop is the main loop for submitting blocks to the DA and SL layers. +// It is triggered by the shouldSubmitBatchCh channel, which is triggered by the block production loop when accumualted produced size is enogh to submit. +// It is also triggered by a BatchSubmitMaxTime timer to limit the time between submissions. func (m *Manager) SubmitLoop(ctx context.Context) { ticker := time.NewTicker(m.Conf.BatchSubmitMaxTime) defer ticker.Stop() for { - var err error select { // Context canceled case <-ctx.Done(): return - // Trigger by block production - case <-m.shouldSubmitBatchCh: - err = m.HandleSubmissionTrigger(ctx) - // trigger by time - case <-ticker.C: - err = m.HandleSubmissionTrigger(ctx) - } - if err == nil { - continue + case <-m.shouldSubmitBatchCh: // Trigger by block production + case <-ticker.C: // trigger by max time } - if errors.Is(err, gerr.ErrUnauthenticated) { + + // modular submission methods have own retries mechanism. + // if error returned, we assume it's unrecoverable. + err := m.HandleSubmissionTrigger(ctx) + if err != nil { panic(fmt.Errorf("handle submission trigger: %w", err)) } - - m.logger.Error("handle submission trigger", "error", err) + ticker.Reset(m.Conf.BatchSubmitMaxTime) } } @@ -67,9 +61,9 @@ func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { return fmt.Errorf("submit next batch to da: %w", err) } - syncHeight, err := m.submitPendingBatchToSL(nextBatch, resultSubmitToDA) + syncHeight, err := m.submitNextBatchToSL(nextBatch, resultSubmitToDA) if err != nil { - panic(fmt.Errorf("submit pending batch to sl: %w", err)) + return fmt.Errorf("submit pending batch to sl: %w", err) } // Update the syncTarget to the height of the last block in the last batch as seen by this node. @@ -81,7 +75,7 @@ func (m *Manager) createNextBatch() (*types.Batch, error) { // Create the batch startHeight := m.SyncTarget.Load() + 1 endHeight := m.Store.Height() - nextBatch, err := m.CreateNextDABatch(startHeight, endHeight) + nextBatch, err := m.CreateNextBatchToSubmit(startHeight, endHeight) if err != nil { m.logger.Error("create next batch", "startHeight", startHeight, "endHeight", endHeight, "error", err) return nil, err @@ -98,14 +92,10 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB startHeight := nextBatch.StartHeight actualEndHeight := nextBatch.EndHeight - isLastBlockEmpty, err := m.isBlockEmpty(actualEndHeight) - if err != nil { - m.logger.Error("validate last block in batch is empty", "startHeight", startHeight, "endHeight", actualEndHeight, "error", err) - return nil, err - } // Verify the last block in the batch is an empty block and that no ibc messages has accidentally passed through. // This block may not be empty if another block has passed it in line. If that's the case our empty block request will // be sent to the next batch. + isLastBlockEmpty := nextBatch.Blocks[len(nextBatch.Blocks)-1].Data.Txs == nil if !isLastBlockEmpty { m.logger.Info("Last block in batch is not an empty block. Requesting for an empty block creation", "endHeight", actualEndHeight) m.produceEmptyBlockCh <- true @@ -121,7 +111,7 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB return &resultSubmitToDA, nil } -func (m *Manager) submitPendingBatchToSL(batch *types.Batch, daResult *da.ResultSubmitBatch) (uint64, error) { +func (m *Manager) submitNextBatchToSL(batch *types.Batch, daResult *da.ResultSubmitBatch) (uint64, error) { startHeight := batch.StartHeight actualEndHeight := batch.EndHeight err := m.SLClient.SubmitBatch(batch, m.DAClient.GetClientType(), daResult) @@ -143,7 +133,7 @@ func (m *Manager) ValidateBatch(batch *types.Batch) error { return nil } -func (m *Manager) CreateNextDABatch(startHeight uint64, endHeight uint64) (*types.Batch, error) { +func (m *Manager) CreateNextBatchToSubmit(startHeight uint64, endHeight uint64) (*types.Batch, error) { var height uint64 // Create the batch batchSize := endHeight - startHeight + 1 From 1e766ab122a16df2263b4b1fe1f4754b82312563 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 14:33:19 +0300 Subject: [PATCH 05/35] removed healthEvents from layers. set by manager on submission skew --- block/manager.go | 10 +-- block/manager_test.go | 2 + block/produce.go | 46 ++++++++----- block/submit.go | 15 +---- block/submit_test.go | 4 +- da/avail/avail.go | 11 +--- da/celestia/celestia.go | 25 +------ da/celestia/rpc_test.go | 32 +++------ da/events.go | 30 --------- da/utils.go | 25 ------- node/node.go | 35 ---------- node/node_test.go | 90 -------------------------- settlement/dymension/dymension.go | 22 ++----- settlement/dymension/dymension_test.go | 26 -------- settlement/events.go | 8 --- utils/event/funcs.go | 2 +- 16 files changed, 52 insertions(+), 331 deletions(-) delete mode 100644 da/events.go delete mode 100644 da/utils.go diff --git a/block/manager.go b/block/manager.go index bf30498ab..bed1f022d 100644 --- a/block/manager.go +++ b/block/manager.go @@ -15,7 +15,6 @@ import ( "code.cloudfoundry.org/go-diodes" - "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/p2p" "github.com/libp2p/go-libp2p/core/crypto" @@ -129,7 +128,7 @@ func NewManager( SyncTargetDiode: diodes.NewOneToOne(1, nil), shouldProduceBlocksCh: make(chan bool, 1), shouldSubmitBatchCh: make(chan bool, 10), //allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 1), + produceEmptyBlockCh: make(chan bool, 5), //TODO: arbitrary number for now, gonna be refactored logger: logger, blockCache: make(map[uint64]CachedBlock), } @@ -172,7 +171,6 @@ func (m *Manager) Start(ctx context.Context, isAggregator bool) error { } if isAggregator { - go uevent.MustSubscribe(ctx, m.Pubsub, "nodeHealth", events.QueryHealthStatus, m.onNodeHealthStatus, m.logger) go m.ProduceBlockLoop(ctx) go m.SubmitLoop(ctx) } else { @@ -223,12 +221,6 @@ func getAddress(key crypto.PrivKey) ([]byte, error) { return tmcrypto.AddressHash(rawKey), nil } -func (m *Manager) onNodeHealthStatus(event pubsub.Message) { - eventData := event.Data().(*events.DataHealthStatus) - m.logger.Info("Received node health status event.", "eventData", eventData) - m.shouldProduceBlocksCh <- eventData.Error == nil -} - // TODO: move to gossip.go // onNewGossippedBlock will take a block and apply it func (m *Manager) onNewGossipedBlock(event pubsub.Message) { diff --git a/block/manager_test.go b/block/manager_test.go index 8e92825e3..f3d82bd1c 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -216,6 +216,8 @@ func TestProducePendingBlock(t *testing.T) { assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.LastState.LastBlockID.Hash)) } +//FIXME: REFACTOR THIS TEST + // TestBlockProductionNodeHealth tests the different scenarios of block production when the node health is toggling. // The test does the following: // 1. Send healthy event and validate blocks are produced diff --git a/block/produce.go b/block/produce.go index 9a1be7722..c287fa591 100644 --- a/block/produce.go +++ b/block/produce.go @@ -6,6 +6,9 @@ import ( "fmt" "time" + "github.com/dymensionxyz/dymint/node/events" + uevent "github.com/dymensionxyz/dymint/utils/event" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" @@ -66,12 +69,27 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { continue } resetEmptyBlocksTimer() - case shouldProduceBlocks := <-m.shouldProduceBlocksCh: - for !shouldProduceBlocks { - m.logger.Info("block production paused - awaiting positive continuation signal") - shouldProduceBlocks = <-m.shouldProduceBlocksCh + + // Check if we should submit the accumulated data + if m.shouldSubmitBatch() { + select { + case m.shouldSubmitBatchCh <- true: + default: + m.logger.Error("new batch accumualted, but channel is full, stopping block production until the signal is consumed") + // emit unhealthy event for the node + evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} + uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + // wait for the signal to be consumed + m.shouldSubmitBatchCh <- true + m.logger.Info("resumed block production") + // emit healthy event for the node + evt = &events.DataHealthStatus{Error: nil} + uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + } + m.produceBlockMutex.Lock() + m.accumulatedProducedSize = 0 + m.produceBlockMutex.Unlock() } - m.logger.Info("resumed block production") } } } @@ -89,21 +107,15 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er return nil } -func (m *Manager) updateAccumaltedSize(size uint64) bool { +func (m *Manager) updateAccumaltedSize(size uint64) { m.accumulatedProducedSize += size +} +// check if we should submit the accumulated data +func (m *Manager) shouldSubmitBatch() bool { // Check if accumulated size is greater than the max size // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) - if m.accumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes { - select { - case m.shouldSubmitBatchCh <- true: - default: - m.logger.Debug("new batch accumualted, but channel is full, skipping submission signal") - } - m.accumulatedProducedSize = 0 - return true - } - return false + return m.accumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes } func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { @@ -191,7 +203,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er } size := uint64(block.ToProto().Size() + commit.ToProto().Size()) - _ = m.updateAccumaltedSize(size) + m.updateAccumaltedSize(size) m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.accumulatedProducedSize) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) diff --git a/block/submit.go b/block/submit.go index d7fe3e141..4c8f1b57f 100644 --- a/block/submit.go +++ b/block/submit.go @@ -98,6 +98,7 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB isLastBlockEmpty := nextBatch.Blocks[len(nextBatch.Blocks)-1].Data.Txs == nil if !isLastBlockEmpty { m.logger.Info("Last block in batch is not an empty block. Requesting for an empty block creation", "endHeight", actualEndHeight) + //TODO: remove from here and move to the block production loop. m.produceEmptyBlockCh <- true } @@ -105,8 +106,7 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB m.logger.Info("Submitting next batch", "startHeight", startHeight, "endHeight", actualEndHeight, "size", nextBatch.ToProto().Size()) resultSubmitToDA := m.DAClient.SubmitBatch(nextBatch) if resultSubmitToDA.Code != da.StatusSuccess { - err = fmt.Errorf("submit next batch to DA Layer: %s", resultSubmitToDA.Message) - return nil, err + return nil, fmt.Errorf("submit next batch to DA Layer: %s", resultSubmitToDA.Message) } return &resultSubmitToDA, nil } @@ -177,14 +177,3 @@ func (m *Manager) CreateNextBatchToSubmit(startHeight uint64, endHeight uint64) batch.EndHeight = height - 1 return batch, nil } - -func (m *Manager) isBlockEmpty(endHeight uint64) (isEmpty bool, err error) { - m.logger.Debug("Verifying last block in batch is an empty block", "endHeight", endHeight, "height") - lastBlock, err := m.Store.LoadBlock(endHeight) - if err != nil { - m.logger.Error("load block", "height", endHeight, "error", err) - return false, err - } - - return len(lastBlock.Data.Txs) == 0, nil -} diff --git a/block/submit_test.go b/block/submit_test.go index 1e02589ed..bfd8a121d 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -96,9 +96,7 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { // try to submit, we expect failure mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("Failed to submit batch")).Once() - assert.Panics(t, func() { - manager.HandleSubmissionTrigger(ctx) - }) + assert.Error(t, manager.HandleSubmissionTrigger(ctx)) // try to submit again, we expect success mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() diff --git a/da/avail/avail.go b/da/avail/avail.go index 1fc440397..0d4e66a22 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -285,19 +285,11 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result } } - res, err := da.SubmitBatchHealthEventHelper(c.pubsubServer, c.ctx, err) - if err != nil { - return res - } - c.logger.Error("Submitted bad health event: trying again.", "error", err) + c.logger.Error(err.Error()) continue } c.logger.Debug("Successfully submitted batch.") - res, err := da.SubmitBatchHealthEventHelper(c.pubsubServer, c.ctx, nil) - if err != nil { - return res - } return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ Code: da.StatusSuccess, @@ -308,7 +300,6 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result Height: daBlockHeight, }, } - } } } diff --git a/da/celestia/celestia.go b/da/celestia/celestia.go index d717581f8..d1b2fee0b 100644 --- a/da/celestia/celestia.go +++ b/da/celestia/celestia.go @@ -212,14 +212,7 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS // TODO(srene): Split batch in multiple blobs if necessary if supported height, commitment, err := c.submit(data) if err != nil { - err = fmt.Errorf("submit batch: %w", err) - - res, err := da.SubmitBatchHealthEventHelper(c.pubsubServer, c.ctx, err) - if err != nil { - return res - } - - c.logger.Error("Submitted bad health event: trying again.", "error", err) + c.logger.Error("submit batch", "error", err) backoff.Sleep() continue } @@ -235,14 +228,7 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS result := c.CheckBatchAvailability(daMetaData) if result.Code != da.StatusSuccess { - err = fmt.Errorf("check batch availability: submitted batch but did not get availability success status: %w", err) - - res, err := da.SubmitBatchHealthEventHelper(c.pubsubServer, c.ctx, err) - if err != nil { - return res - } - - c.logger.Error("Submitted bad health event: trying again.", "error", err) + c.logger.Error("check batch availability: submitted batch but did not get availability success status", "error", err) backoff.Sleep() continue } @@ -250,12 +236,7 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS daMetaData.Index = result.CheckMetaData.Index daMetaData.Length = result.CheckMetaData.Length - res, err := da.SubmitBatchHealthEventHelper(c.pubsubServer, c.ctx, nil) - if err != nil { - return res - } - - c.logger.Debug("Batch accepted, emitted healthy event.") + c.logger.Debug("Batch accepted") return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ diff --git a/da/celestia/rpc_test.go b/da/celestia/rpc_test.go index 11f622533..0823207dc 100644 --- a/da/celestia/rpc_test.go +++ b/da/celestia/rpc_test.go @@ -2,7 +2,6 @@ package celestia_test import ( "bytes" - "context" "crypto/sha256" "encoding/json" "errors" @@ -74,7 +73,6 @@ func TestSubmitBatch(t *testing.T) { submitPFBReturn []interface{} sumbitPFDRun func(args mock.Arguments) expectedInclusionHeight uint64 - expectedHealthEvent *da.EventDataHealth getProofReturn []interface{} getProofDRun func(args mock.Arguments) includedReturn []interface{} @@ -89,17 +87,15 @@ func TestSubmitBatch(t *testing.T) { getProofDRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, includedRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, expectedInclusionHeight: uint64(1234), - expectedHealthEvent: &da.EventDataHealth{}, }, { - name: "TestSubmitPFBErrored", - submitPFBReturn: []interface{}{uint64(0), timeOutErr}, - getProofReturn: []interface{}{&blobProof, nil}, - includedReturn: []interface{}{true, nil}, - sumbitPFDRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, - getProofDRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, - includedRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, - expectedHealthEvent: &da.EventDataHealth{Error: timeOutErr}, + name: "TestSubmitPFBErrored", + submitPFBReturn: []interface{}{uint64(0), timeOutErr}, + getProofReturn: []interface{}{&blobProof, nil}, + includedReturn: []interface{}{true, nil}, + sumbitPFDRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, + getProofDRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, + includedRun: func(args mock.Arguments) { time.Sleep(10 * time.Millisecond) }, }, } for _, tc := range cases { @@ -118,8 +114,6 @@ func TestSubmitBatch(t *testing.T) { pubsubServer := pubsub.NewServer() err = pubsubServer.Start() require.NoError(err, tc.name) - HealthSubscription, err := pubsubServer.Subscribe(context.Background(), "testSubmitBatch", da.EventQueryDAHealthStatus) - assert.NoError(err, tc.name) // Start the DALC dalc := celestia.DataAvailabilityLayerClient{} err = dalc.Init(configBytes, pubsubServer, nil, log.TestingLogger(), options...) @@ -149,20 +143,10 @@ func TestSubmitBatch(t *testing.T) { if res.SubmitMetaData != nil { assert.Equal(res.SubmitMetaData.Height, tc.expectedInclusionHeight, tc.name) } - time.Sleep(100 * time.Millisecond) done <- true }() - select { - case event := <-HealthSubscription.Out(): - healthStatusEvent := event.Data().(*da.EventDataHealth) - t.Log("got health status event", healthStatusEvent.Error) - assert.ErrorIs(healthStatusEvent.Error, tc.expectedHealthEvent.Error, tc.name) - case <-time.After(1 * time.Second): - t.Error("timeout. expected health status event but didn't get one") - case <-done: - t.Error("submit done. expected health status event but didn't get one") - } + time.Sleep(100 * time.Millisecond) err = dalc.Stop() require.NoError(err, tc.name) // Wait for the goroutines to finish before accessing the mock calls diff --git a/da/events.go b/da/events.go deleted file mode 100644 index 4e1935bc6..000000000 --- a/da/events.go +++ /dev/null @@ -1,30 +0,0 @@ -package da - -import uevent "github.com/dymensionxyz/dymint/utils/event" - -// Type keys -const ( - // EventTypeKey is a reserved composite key for event name. - EventTypeKey = "da.event" -) - -// Types - -const ( - EventHealthStatus = "DAHealthStatus" -) - -// Convenience objects - -var EventHealthStatusList = map[string][]string{EventTypeKey: {EventHealthStatus}} - -// Data - -type EventDataHealth struct { - // Error is the error that was encountered in case of a health check failure, nil implies healthy - Error error -} - -// Queries - -var EventQueryDAHealthStatus = uevent.QueryFor(EventTypeKey, EventHealthStatus) diff --git a/da/utils.go b/da/utils.go deleted file mode 100644 index 550fd7785..000000000 --- a/da/utils.go +++ /dev/null @@ -1,25 +0,0 @@ -package da - -import ( - "context" - - "github.com/tendermint/tendermint/libs/pubsub" -) - -func SubmitBatchHealthEventHelper(pubsubServer *pubsub.Server, ctx context.Context, err error) (ResultSubmitBatch, error) { - err = pubsubServer.PublishWithEvents( - ctx, - &EventDataHealth{Error: err}, - EventHealthStatusList, - ) - if err != nil { - return ResultSubmitBatch{ - BaseResult: BaseResult{ - Code: StatusError, - Message: err.Error(), - Error: err, - }, - }, err - } - return ResultSubmitBatch{}, nil -} diff --git a/node/node.go b/node/node.go index 5ec14b931..f9440bbc6 100644 --- a/node/node.go +++ b/node/node.go @@ -10,10 +10,6 @@ import ( "sync" "time" - uevent "github.com/dymensionxyz/dymint/utils/event" - - "github.com/dymensionxyz/dymint/node/events" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/libp2p/go-libp2p/core/crypto" @@ -310,8 +306,6 @@ func (n *Node) OnStart() error { } }() - n.startEventListener() - // start the block manager err = n.blockManager.Start(n.Ctx, n.conf.Aggregator) if err != nil { @@ -407,35 +401,6 @@ func createAndStartIndexerService( return indexerService, txIndexer, blockIndexer, nil } -// All events listeners should be registered here -func (n *Node) startEventListener() { - go uevent.MustSubscribe(n.Ctx, n.PubsubServer, "settlementHealthStatusHandler", settlement.EventQuerySettlementHealthStatus, n.onBaseLayerHealthUpdate, n.Logger) - go uevent.MustSubscribe(n.Ctx, n.PubsubServer, "daHealthStatusHandler", da.EventQueryDAHealthStatus, n.onBaseLayerHealthUpdate, n.Logger) -} - -func (n *Node) onBaseLayerHealthUpdate(event pubsub.Message) { - haveNewErr := false - oldStatus := n.baseLayerHealth.get() - switch e := event.Data().(type) { - case *settlement.EventDataHealth: - haveNewErr = e.Error != nil - n.baseLayerHealth.setSettlement(e.Error) - case *da.EventDataHealth: - haveNewErr = e.Error != nil - n.baseLayerHealth.setDA(e.Error) - } - newStatus := n.baseLayerHealth.get() - newStatusIsDifferentFromOldOne := (oldStatus == nil) != (newStatus == nil) - shouldPublish := newStatusIsDifferentFromOldOne || haveNewErr - if shouldPublish { - evt := &events.DataHealthStatus{Error: newStatus} - if newStatus != nil { - n.Logger.Error("Node is unhealthy: base layer has problem.", "error", newStatus) - } - uevent.MustPublish(n.Ctx, n.PubsubServer, evt, events.HealthStatusList) - } -} - func (n *Node) startPrometheusServer() error { if n.conf.Instrumentation != nil && n.conf.Instrumentation.Prometheus { http.Handle("/metrics", promhttp.Handler()) diff --git a/node/node_test.go b/node/node_test.go index 439137c36..40c3f2dd9 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3,7 +3,6 @@ package node_test import ( "context" "crypto/rand" - "errors" "testing" "time" @@ -11,10 +10,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/dymensionxyz/dymint/da" "github.com/dymensionxyz/dymint/mempool" "github.com/dymensionxyz/dymint/node" - "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/settlement" "github.com/dymensionxyz/dymint/testutil" @@ -122,90 +119,3 @@ func TestMempoolDirectly(t *testing.T) { assert.Equal(int64(4*len("tx*")), node.Mempool.SizeBytes()) } - -func TestHealthStatusEventHandler(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - node, err := testutil.CreateNode(false, nil) - require.NoError(err) - require.NotNil(node) - - err = node.Start() - assert.NoError(err) - // wait for node to start - time.Sleep(1 * time.Second) - - slError := errors.New("settlement") - daError := errors.New("da") - - cases := []struct { - name string - baseLayerHealthStatusEvent map[string][]string - baseLayerHealthStatusEventData interface{} - expectHealthStatusEventEmitted bool - expectedError error - }{ - { - name: "settlement layer is healthy and da layer is healthy", - baseLayerHealthStatusEvent: settlement.EventHealthStatusList, - baseLayerHealthStatusEventData: &settlement.EventDataHealth{Error: slError}, - expectHealthStatusEventEmitted: true, - expectedError: slError, - }, - { - name: "now da also becomes unhealthy", - baseLayerHealthStatusEvent: da.EventHealthStatusList, - baseLayerHealthStatusEventData: &da.EventDataHealth{Error: daError}, - expectHealthStatusEventEmitted: true, - expectedError: daError, - }, - { - name: "now the settlement layer becomes healthy", - baseLayerHealthStatusEvent: settlement.EventHealthStatusList, - baseLayerHealthStatusEventData: &settlement.EventDataHealth{}, - expectHealthStatusEventEmitted: false, - expectedError: nil, - }, - { - name: "now the da layer becomes healthy, so we expect the health status to be healthy and the event to be emitted", - baseLayerHealthStatusEvent: da.EventHealthStatusList, - baseLayerHealthStatusEventData: &da.EventDataHealth{}, - expectHealthStatusEventEmitted: true, - expectedError: nil, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - done := make(chan bool, 1) - ready := make(chan bool, 1) - go func() { - HealthSubscription, err := node.PubsubServer.Subscribe(node.Ctx, c.name, events.QueryHealthStatus) - ready <- true - assert.NoError(err) - select { - case event := <-HealthSubscription.Out(): - if !c.expectHealthStatusEventEmitted { - t.Error("didn't expect health status event but got one") - } - healthStatusEvent := event.Data().(*events.DataHealthStatus) - assert.ErrorIs(healthStatusEvent.Error, c.expectedError) - done <- true - break - case <-time.After(1 * time.Second): - if c.expectHealthStatusEventEmitted { - t.Error("expected health status event but didn't get one") - } - done <- true - break - } - }() - <-ready - // Emit an event. - node.PubsubServer.PublishWithEvents(context.Background(), c.baseLayerHealthStatusEventData, c.baseLayerHealthStatusEvent) - <-done - }) - } - err = node.Stop() - assert.NoError(err) -} diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index f5a89ac7e..6f471228b 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -230,13 +230,8 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * default: err := d.submitBatch(msgUpdateState) if err != nil { - - err = fmt.Errorf("submit batch: %w", err) - - uevent.MustPublish(d.ctx, d.pubsub, &settlement.EventDataHealth{Error: err}, settlement.EventHealthStatusList) - d.logger.Error( - "Submitted bad health event: trying again.", + "submit batch", "startHeight", batch.StartHeight, "endHeight", @@ -263,9 +258,7 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * return fmt.Errorf("subscription cancelled: %w", err) case <-subscription.Out(): - uevent.MustPublish(d.ctx, d.pubsub, &settlement.EventDataHealth{}, settlement.EventHealthStatusList) - d.logger.Debug("Batch accepted: emitted healthy event.", "startHeight", batch.StartHeight, "endHeight", batch.EndHeight) - + d.logger.Debug("Batch accepted", "startHeight", batch.StartHeight, "endHeight", batch.EndHeight) return nil case <-timer.C: @@ -273,13 +266,8 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * // layer, and we've just missed the event. includedBatch, err := d.waitForBatchInclusion(batch.StartHeight) if err != nil { - - err = fmt.Errorf("wait for batch inclusion: %w", err) - - uevent.MustPublish(d.ctx, d.pubsub, &settlement.EventDataHealth{Error: err}, settlement.EventHealthStatusList) - d.logger.Error( - "Submitted bad health event: trying again.", + "wait for batch inclusion", "startHeight", batch.StartHeight, "endHeight", @@ -293,9 +281,7 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * } // all good - uevent.MustPublish(d.ctx, d.pubsub, &settlement.EventDataHealth{}, settlement.EventHealthStatusList) - d.logger.Info("Batch accepted, emitted healthy event.", "startHeight", includedBatch.StartHeight, "endHeight", includedBatch.EndHeight) - + d.logger.Info("Batch accepted", "startHeight", includedBatch.StartHeight, "endHeight", includedBatch.EndHeight) return nil } } diff --git a/settlement/dymension/dymension_test.go b/settlement/dymension/dymension_test.go index a1a8d80fd..e726be8d2 100644 --- a/settlement/dymension/dymension_test.go +++ b/settlement/dymension/dymension_test.go @@ -1,11 +1,8 @@ package dymension_test import ( - "context" "errors" "fmt" - "sync" - "sync/atomic" "testing" "time" @@ -114,9 +111,6 @@ func TestPostBatch(t *testing.T) { require.NoError(err) batch, err := testutil.GenerateBatch(1, 1, propserKey) require.NoError(err) - // Subscribe to the health status event - HealthSubscription, err := pubsubServer.Subscribe(context.Background(), "testPostBatch", settlement.EventQuerySettlementHealthStatus) - assert.NoError(t, err) cases := []struct { name string @@ -161,10 +155,6 @@ func TestPostBatch(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - // Init the wait group and set the number of expected events - var wg sync.WaitGroup - eventsCount := 1 - wg.Add(eventsCount) // Reset the mock functions testutil.UnsetMockFn(cosmosClientMock.On("BroadcastTx")) testutil.UnsetMockFn(rollappQueryClientMock.On("StateInfo")) @@ -193,20 +183,6 @@ func TestPostBatch(t *testing.T) { require.NoError(err) err = hubClient.Start() require.NoError(err) - // Handle the various events that are emitted and timeout if we don't get them - var eventsReceivedCount int64 - go func() { - select { - case healthEvent := <-HealthSubscription.Out(): - t.Logf("got health event: %v", healthEvent) - healthStatusEvent := healthEvent.Data().(*settlement.EventDataHealth) - assert.ErrorIs(t, healthStatusEvent.Error, c.expectedError) - atomic.AddInt64(&eventsReceivedCount, 1) - case <-time.After(10 * time.Second): - t.Error("Didn't receive health event") - } - wg.Done() - }() resultSubmitBatch := &da.ResultSubmitBatch{} resultSubmitBatch.SubmitMetaData = &da.DASubmitMetaData{} @@ -237,8 +213,6 @@ func TestPostBatch(t *testing.T) { }, } } - wg.Wait() - assert.Equal(t, eventsCount, int(eventsReceivedCount)) // Stop the hub client and wait for it to stop err = hubClient.Stop() require.NoError(err) diff --git a/settlement/events.go b/settlement/events.go index 4a53098e1..77eead134 100644 --- a/settlement/events.go +++ b/settlement/events.go @@ -18,13 +18,11 @@ const ( // EventNewBatchAccepted should be emitted internally in order to communicate between the settlement layer and the hub client EventNewBatchAccepted = "EventNewBatchAccepted" EventSequencersListUpdated = "SequencersListUpdated" - EventHealthStatus = "SettlementHealthStatus" ) // Convenience objects var ( - EventHealthStatusList = map[string][]string{EventTypeKey: {EventHealthStatus}} EventNewBatchAcceptedList = map[string][]string{EventTypeKey: {EventNewBatchAccepted}} ) @@ -42,13 +40,7 @@ type EventDataSequencersListUpdated struct { Sequencers []types.Sequencer } -type EventDataHealth struct { - // Error is the error that was encountered in case of a health check failure, nil implies healthy - Error error -} - // Queries var ( EventQueryNewSettlementBatchAccepted = uevent.QueryFor(EventTypeKey, EventNewBatchAccepted) - EventQuerySettlementHealthStatus = uevent.QueryFor(EventTypeKey, EventHealthStatus) ) diff --git a/utils/event/funcs.go b/utils/event/funcs.go index 696af1226..85a8be224 100644 --- a/utils/event/funcs.go +++ b/utils/event/funcs.go @@ -41,7 +41,7 @@ func MustSubscribe( } // MustPublish submits an event or panics -func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg any, events map[string][]string) { +func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface{}, events map[string][]string) { err := pubsubServer.PublishWithEvents(ctx, msg, events) if err != nil { panic(err) From 8c4df904d20e71b163e3d99049c8936ac0bed9aa Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 15:08:52 +0300 Subject: [PATCH 06/35] cleanup --- block/manager.go | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/block/manager.go b/block/manager.go index bed1f022d..1a329dcf5 100644 --- a/block/manager.go +++ b/block/manager.go @@ -51,15 +51,12 @@ type Manager struct { SLClient settlement.LayerI // Data retrieval - Retriever da.BatchRetriever - + Retriever da.BatchRetriever SyncTargetDiode diodes.Diode SyncTarget atomic.Uint64 // Block production - //TODO: populate the accumualtedSize on startup accumulatedProducedSize uint64 - shouldProduceBlocksCh chan bool shouldSubmitBatchCh chan bool produceEmptyBlockCh chan bool lastSubmissionTime atomic.Int64 @@ -114,23 +111,23 @@ func NewManager( } agg := &Manager{ - Pubsub: pubsub, - p2pClient: p2pClient, - ProposerKey: proposerKey, - Conf: conf, - Genesis: genesis, - LastState: s, - Store: store, - Executor: exec, - DAClient: dalc, - SLClient: settlementClient, - Retriever: dalc.(da.BatchRetriever), - SyncTargetDiode: diodes.NewOneToOne(1, nil), - shouldProduceBlocksCh: make(chan bool, 1), - shouldSubmitBatchCh: make(chan bool, 10), //allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 5), //TODO: arbitrary number for now, gonna be refactored - logger: logger, - blockCache: make(map[uint64]CachedBlock), + Pubsub: pubsub, + p2pClient: p2pClient, + ProposerKey: proposerKey, + Conf: conf, + Genesis: genesis, + LastState: s, + Store: store, + Executor: exec, + DAClient: dalc, + SLClient: settlementClient, + Retriever: dalc.(da.BatchRetriever), + SyncTargetDiode: diodes.NewOneToOne(1, nil), + accumulatedProducedSize: 0, + shouldSubmitBatchCh: make(chan bool, 10), //allow capacity for multiple pending batches to support bursts + produceEmptyBlockCh: make(chan bool, 5), //TODO: arbitrary number for now, gonna be refactored + logger: logger, + blockCache: make(map[uint64]CachedBlock), } return agg, nil @@ -171,6 +168,7 @@ func (m *Manager) Start(ctx context.Context, isAggregator bool) error { } if isAggregator { + //TODO: populate the accumualtedSize on startup go m.ProduceBlockLoop(ctx) go m.SubmitLoop(ctx) } else { From f3592e547598a0748f7d46548556b48a3ceef7fd Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 16:57:37 +0300 Subject: [PATCH 07/35] fixed defaults --- block/produce.go | 1 + config/defaults.go | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/block/produce.go b/block/produce.go index c287fa591..b6ac276bd 100644 --- a/block/produce.go +++ b/block/produce.go @@ -74,6 +74,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { if m.shouldSubmitBatch() { select { case m.shouldSubmitBatchCh <- true: + m.logger.Info("new batch accumualted, signal sent to submit the batch") default: m.logger.Error("new batch accumualted, but channel is full, stopping block production until the signal is consumed") // emit unhealthy event for the node diff --git a/config/defaults.go b/config/defaults.go index 94e7cbfea..afc7973e2 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -28,9 +28,10 @@ func DefaultConfig(home, chainId string) *NodeConfig { }, Aggregator: true, BlockManagerConfig: BlockManagerConfig{ - BlockTime: 200 * time.Millisecond, - EmptyBlocksMaxTime: 3600 * time.Second, - BatchSubmitMaxTime: 100 * time.Second, + BlockTime: 200 * time.Millisecond, + //TODO (#807): empty block will increase once we'll have dedicated timer to support IBC transfers + EmptyBlocksMaxTime: 100 * time.Second, + BatchSubmitMaxTime: 3600 * time.Second, NamespaceID: "0000000000000000ffff", BlockBatchMaxSizeBytes: 500000, GossipedBlocksCacheSize: 50, From 69732822c6cff4971e8113df906a259d056bd31c Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 18:04:42 +0300 Subject: [PATCH 08/35] fix UT --- block/manager.go | 14 +++-- block/manager_test.go | 78 ------------------------- block/produce.go | 12 ++-- block/production_test.go | 123 ++++++++++++++++++++++++++++++++++++++- block/submit.go | 2 +- block/submit_test.go | 4 +- 6 files changed, 140 insertions(+), 93 deletions(-) diff --git a/block/manager.go b/block/manager.go index 1a329dcf5..d6a9819a0 100644 --- a/block/manager.go +++ b/block/manager.go @@ -32,6 +32,10 @@ import ( "github.com/dymensionxyz/dymint/types" ) +const ( + maxSupportedBatchSkew = 10 +) + // Manager is responsible for aggregating transactions into blocks. type Manager struct { // Configuration @@ -56,8 +60,8 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production - accumulatedProducedSize uint64 - shouldSubmitBatchCh chan bool + AccumulatedProducedSize uint64 + ShouldSubmitBatchCh chan bool produceEmptyBlockCh chan bool lastSubmissionTime atomic.Int64 @@ -123,9 +127,9 @@ func NewManager( SLClient: settlementClient, Retriever: dalc.(da.BatchRetriever), SyncTargetDiode: diodes.NewOneToOne(1, nil), - accumulatedProducedSize: 0, - shouldSubmitBatchCh: make(chan bool, 10), //allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 5), //TODO: arbitrary number for now, gonna be refactored + AccumulatedProducedSize: 0, + ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts + produceEmptyBlockCh: make(chan bool, 5), //TODO(#807): arbitrary number for now, gonna be refactored logger: logger, blockCache: make(map[uint64]CachedBlock), } diff --git a/block/manager_test.go b/block/manager_test.go index f3d82bd1c..fa1bea3b8 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -3,7 +3,6 @@ package block_test import ( "context" "crypto/rand" - "errors" "testing" "time" @@ -12,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/dymensionxyz/dymint/block" - "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/p2p" "github.com/dymensionxyz/dymint/settlement" "github.com/dymensionxyz/dymint/testutil" @@ -216,82 +214,6 @@ func TestProducePendingBlock(t *testing.T) { assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.LastState.LastBlockID.Hash)) } -//FIXME: REFACTOR THIS TEST - -// TestBlockProductionNodeHealth tests the different scenarios of block production when the node health is toggling. -// The test does the following: -// 1. Send healthy event and validate blocks are produced -// 2. Send unhealthy event and validate blocks are not produced -// 3. Send another unhealthy event and validate blocks are still not produced -// 4. Send healthy event and validate blocks are produced -func TestBlockProductionNodeHealth(t *testing.T) { - require := require.New(t) - assert := assert.New(t) - // Setup app - app := testutil.GetAppMock() - // Create proxy app - clientCreator := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(clientCreator) - err := proxyApp.Start() - require.NoError(err) - // Init manager - manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, nil, 1, 1, 0, proxyApp, nil) - require.NoError(err) - - cases := []struct { - name string - healthStatusEvent map[string][]string - healthStatusEventData interface{} - shouldProduceBlocks bool - }{ - { - name: "HealthyEventBlocksProduced", - healthStatusEvent: events.HealthStatusList, - healthStatusEventData: &events.DataHealthStatus{}, - shouldProduceBlocks: true, - }, - { - name: "UnhealthyEventBlocksNotProduced", - healthStatusEvent: events.HealthStatusList, - healthStatusEventData: &events.DataHealthStatus{Error: errors.New("unhealthy")}, - shouldProduceBlocks: false, - }, - { - name: "UnhealthyEventBlocksStillNotProduced", - healthStatusEvent: events.HealthStatusList, - healthStatusEventData: &events.DataHealthStatus{Error: errors.New("unhealthy")}, - shouldProduceBlocks: false, - }, - { - name: "HealthyEventBlocksProduced", - healthStatusEvent: events.HealthStatusList, - healthStatusEventData: &events.DataHealthStatus{}, - shouldProduceBlocks: true, - }, - } - // Start the manager - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - err = manager.Start(ctx, true) - require.NoError(err) - time.Sleep(100 * time.Millisecond) - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - err := manager.Pubsub.PublishWithEvents(context.Background(), c.healthStatusEventData, c.healthStatusEvent) - assert.NoError(err, "PublishWithEvents should not produce an error") - time.Sleep(500 * time.Millisecond) - blockHeight := manager.Store.Height() - time.Sleep(500 * time.Millisecond) - if c.shouldProduceBlocks { - assert.Greater(manager.Store.Height(), blockHeight) - } else { - assert.Equal(blockHeight, manager.Store.Height()) - } - }) - } -} - // Test that in case we fail after the proxy app commit, next time we won't commit again to the proxy app // and only update the store height and app hash. This test does the following: // 1. Produce first block successfully diff --git a/block/produce.go b/block/produce.go index b6ac276bd..1932fc58c 100644 --- a/block/produce.go +++ b/block/produce.go @@ -73,7 +73,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { // Check if we should submit the accumulated data if m.shouldSubmitBatch() { select { - case m.shouldSubmitBatchCh <- true: + case m.ShouldSubmitBatchCh <- true: m.logger.Info("new batch accumualted, signal sent to submit the batch") default: m.logger.Error("new batch accumualted, but channel is full, stopping block production until the signal is consumed") @@ -81,14 +81,14 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) // wait for the signal to be consumed - m.shouldSubmitBatchCh <- true + m.ShouldSubmitBatchCh <- true m.logger.Info("resumed block production") // emit healthy event for the node evt = &events.DataHealthStatus{Error: nil} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) } m.produceBlockMutex.Lock() - m.accumulatedProducedSize = 0 + m.AccumulatedProducedSize = 0 m.produceBlockMutex.Unlock() } } @@ -109,14 +109,14 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er } func (m *Manager) updateAccumaltedSize(size uint64) { - m.accumulatedProducedSize += size + m.AccumulatedProducedSize += size } // check if we should submit the accumulated data func (m *Manager) shouldSubmitBatch() bool { // Check if accumulated size is greater than the max size // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) - return m.accumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes + return m.AccumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes } func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { @@ -206,7 +206,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er size := uint64(block.ToProto().Size() + commit.ToProto().Size()) m.updateAccumaltedSize(size) - m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.accumulatedProducedSize) + m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.AccumulatedProducedSize) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) types.RollappBlockSizeTxsGauge.Set(float64(len(block.Data.Txs))) types.RollappHeightGauge.Set(float64(newHeight)) diff --git a/block/production_test.go b/block/production_test.go index c6e82eb11..aa982bf36 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -11,11 +11,14 @@ import ( "github.com/dymensionxyz/dymint/mempool" mempoolv1 "github.com/dymensionxyz/dymint/mempool/v1" + "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/types" + uevent "github.com/dymensionxyz/dymint/utils/event" tmcfg "github.com/tendermint/tendermint/config" "github.com/dymensionxyz/dymint/testutil" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/proxy" ) @@ -92,7 +95,7 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { } func TestCreateEmptyBlocksNew(t *testing.T) { - t.Skip("FIXME: fails to submit tx to test the empty blocks feature") + t.Skip("FIXME: fails to submit tx to test the empty blocks feature") //TODO(#352) assert := assert.New(t) require := require.New(t) app := testutil.GetAppMock() @@ -202,3 +205,121 @@ func TestInvalidBatch(t *testing.T) { } } } + +func TestSubmissionTrigger(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + cases := []struct { + name string + blockBatchMaxSizeBytes uint64 + expectedSubmission bool + }{ + { + name: "block batch max size is fullfilled", + blockBatchMaxSizeBytes: 1000, + expectedSubmission: true, + }, + { + name: "block batch max size is not fullfilled", + blockBatchMaxSizeBytes: 100000, + expectedSubmission: false, + }, + } + + for _, c := range cases { + managerConfig := testutil.GetManagerConfig() + managerConfig.BlockBatchMaxSizeBytes = c.blockBatchMaxSizeBytes + manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) + require.NoError(err) + + //validate initial accumalted is zero + require.Equal(manager.AccumulatedProducedSize, uint64(0)) + assert.Equal(manager.Store.Height(), uint64(0)) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + // produce block + go manager.ProduceBlockLoop(ctx) + + //wait for block to be produced but not for submission threshold + time.Sleep(400 * time.Millisecond) + assert.Greater(manager.Store.Height(), uint64(0)) + assert.Greater(manager.AccumulatedProducedSize, uint64(0)) + + //wait for submission signal + sent := false + select { + case <-ctx.Done(): + case <-manager.ShouldSubmitBatchCh: + sent = true + time.Sleep(100 * time.Millisecond) + assert.Equal(manager.AccumulatedProducedSize, uint64(0)) + } + + assert.Equal(c.expectedSubmission, sent) + } +} + +func TestStopBlockProduction(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + managerConfig := testutil.GetManagerConfig() + managerConfig.BlockBatchMaxSizeBytes = 1000 // small batch size to fill up quickly + manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) + require.NoError(err) + + //validate initial accumalted is zero + require.Equal(manager.AccumulatedProducedSize, uint64(0)) + assert.Equal(manager.Store.Height(), uint64(0)) + + // subscribe to health status event + eventRecievedCh := make(chan error) + cb := func(event pubsub.Message) { + eventRecievedCh <- event.Data().(*events.DataHealthStatus).Error + } + go uevent.MustSubscribe(context.Background(), manager.Pubsub, "HealthStatusHandler", events.QueryHealthStatus, cb, log.TestingLogger()) + + ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second) + defer cancel() + + // produce block + go manager.ProduceBlockLoop(ctx) + + //validate block production works + time.Sleep(400 * time.Millisecond) + assert.Greater(manager.Store.Height(), uint64(0)) + assert.Greater(manager.AccumulatedProducedSize, uint64(0)) + + // we don't read from the submit channel, so we assume it get full + // we expect the block production to stop and unhealthy event to be emitted + select { + case <-ctx.Done(): + t.Error("expected unhealthy event") + case err := <-eventRecievedCh: + assert.Error(err) + } + + stoppedHeight := manager.Store.Height() + + // make sure block production is stopped + time.Sleep(400 * time.Millisecond) + assert.Equal(stoppedHeight, manager.Store.Height()) + + // consume the signal + <-manager.ShouldSubmitBatchCh + + // check for health status event and block production to continue + select { + case <-ctx.Done(): + t.Error("expected unhealthy event") + case err := <-eventRecievedCh: + assert.NoError(err) + } + + // make sure block production is resumed + time.Sleep(400 * time.Millisecond) + assert.Greater(manager.Store.Height(), stoppedHeight) +} diff --git a/block/submit.go b/block/submit.go index 4c8f1b57f..09f8b932c 100644 --- a/block/submit.go +++ b/block/submit.go @@ -21,7 +21,7 @@ func (m *Manager) SubmitLoop(ctx context.Context) { // Context canceled case <-ctx.Done(): return - case <-m.shouldSubmitBatchCh: // Trigger by block production + case <-m.ShouldSubmitBatchCh: // Trigger by block production case <-ticker.C: // trigger by max time } diff --git a/block/submit_test.go b/block/submit_test.go index bfd8a121d..0562f189a 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -23,11 +23,10 @@ import ( "github.com/dymensionxyz/dymint/types" ) -var ctx = context.Background() - func TestBatchSubmissionHappyFlow(t *testing.T) { require := require.New(t) app := testutil.GetAppMock() + ctx := context.Background() // Create proxy app clientCreator := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(clientCreator) @@ -56,6 +55,7 @@ func TestBatchSubmissionHappyFlow(t *testing.T) { func TestBatchSubmissionFailedSubmission(t *testing.T) { require := require.New(t) app := testutil.GetAppMock() + ctx := context.Background() // Create proxy app clientCreator := proxy.NewLocalClientCreator(app) From 81baf80fcca4658e6da9114526a059b1dbe8a387 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 18:21:29 +0300 Subject: [PATCH 09/35] changed accumaletd counter to be atomic --- block/manager.go | 35 +++++++++++++++++------------------ block/produce.go | 9 +++++---- block/production_test.go | 10 +++++----- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/block/manager.go b/block/manager.go index d6a9819a0..6b9c36271 100644 --- a/block/manager.go +++ b/block/manager.go @@ -60,7 +60,7 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production - AccumulatedProducedSize uint64 + AccumulatedProducedSize atomic.Uint64 ShouldSubmitBatchCh chan bool produceEmptyBlockCh chan bool lastSubmissionTime atomic.Int64 @@ -115,23 +115,22 @@ func NewManager( } agg := &Manager{ - Pubsub: pubsub, - p2pClient: p2pClient, - ProposerKey: proposerKey, - Conf: conf, - Genesis: genesis, - LastState: s, - Store: store, - Executor: exec, - DAClient: dalc, - SLClient: settlementClient, - Retriever: dalc.(da.BatchRetriever), - SyncTargetDiode: diodes.NewOneToOne(1, nil), - AccumulatedProducedSize: 0, - ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 5), //TODO(#807): arbitrary number for now, gonna be refactored - logger: logger, - blockCache: make(map[uint64]CachedBlock), + Pubsub: pubsub, + p2pClient: p2pClient, + ProposerKey: proposerKey, + Conf: conf, + Genesis: genesis, + LastState: s, + Store: store, + Executor: exec, + DAClient: dalc, + SLClient: settlementClient, + Retriever: dalc.(da.BatchRetriever), + SyncTargetDiode: diodes.NewOneToOne(1, nil), + ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts + produceEmptyBlockCh: make(chan bool, 5), //TODO(#807): arbitrary number for now, gonna be refactored + logger: logger, + blockCache: make(map[uint64]CachedBlock), } return agg, nil diff --git a/block/produce.go b/block/produce.go index 1932fc58c..1e3034ed0 100644 --- a/block/produce.go +++ b/block/produce.go @@ -88,7 +88,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) } m.produceBlockMutex.Lock() - m.AccumulatedProducedSize = 0 + m.AccumulatedProducedSize.Store(0) m.produceBlockMutex.Unlock() } } @@ -109,14 +109,15 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er } func (m *Manager) updateAccumaltedSize(size uint64) { - m.AccumulatedProducedSize += size + curr := m.AccumulatedProducedSize.Load() + _ = m.AccumulatedProducedSize.CompareAndSwap(curr, curr+size) } // check if we should submit the accumulated data func (m *Manager) shouldSubmitBatch() bool { // Check if accumulated size is greater than the max size // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) - return m.AccumulatedProducedSize > m.Conf.BlockBatchMaxSizeBytes + return m.AccumulatedProducedSize.Load() > m.Conf.BlockBatchMaxSizeBytes } func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { @@ -206,7 +207,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er size := uint64(block.ToProto().Size() + commit.ToProto().Size()) m.updateAccumaltedSize(size) - m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.AccumulatedProducedSize) + m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.AccumulatedProducedSize.Load()) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) types.RollappBlockSizeTxsGauge.Set(float64(len(block.Data.Txs))) types.RollappHeightGauge.Set(float64(newHeight)) diff --git a/block/production_test.go b/block/production_test.go index aa982bf36..fed26282f 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -234,7 +234,7 @@ func TestSubmissionTrigger(t *testing.T) { require.NoError(err) //validate initial accumalted is zero - require.Equal(manager.AccumulatedProducedSize, uint64(0)) + require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -246,7 +246,7 @@ func TestSubmissionTrigger(t *testing.T) { //wait for block to be produced but not for submission threshold time.Sleep(400 * time.Millisecond) assert.Greater(manager.Store.Height(), uint64(0)) - assert.Greater(manager.AccumulatedProducedSize, uint64(0)) + assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) //wait for submission signal sent := false @@ -255,7 +255,7 @@ func TestSubmissionTrigger(t *testing.T) { case <-manager.ShouldSubmitBatchCh: sent = true time.Sleep(100 * time.Millisecond) - assert.Equal(manager.AccumulatedProducedSize, uint64(0)) + assert.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) } assert.Equal(c.expectedSubmission, sent) @@ -272,7 +272,7 @@ func TestStopBlockProduction(t *testing.T) { require.NoError(err) //validate initial accumalted is zero - require.Equal(manager.AccumulatedProducedSize, uint64(0)) + require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) // subscribe to health status event @@ -291,7 +291,7 @@ func TestStopBlockProduction(t *testing.T) { //validate block production works time.Sleep(400 * time.Millisecond) assert.Greater(manager.Store.Height(), uint64(0)) - assert.Greater(manager.AccumulatedProducedSize, uint64(0)) + assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) // we don't read from the submit channel, so we assume it get full // we expect the block production to stop and unhealthy event to be emitted From 24d8b854018ac0ac9253c4c41e05c991987f8682 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 6 May 2024 19:36:52 +0300 Subject: [PATCH 10/35] fixed UT --- settlement/dymension/dymension.go | 3 +-- settlement/dymension/dymension_test.go | 7 ++++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index 6f471228b..a201fa2ee 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -262,8 +262,7 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * return nil case <-timer.C: - // Before emitting unhealthy event, check if the batch was accepted by the settlement - // layer, and we've just missed the event. + // Check if the batch was accepted by the settlement layer, and we've just missed the event. includedBatch, err := d.waitForBatchInclusion(batch.StartHeight) if err != nil { d.logger.Error( diff --git a/settlement/dymension/dymension_test.go b/settlement/dymension/dymension_test.go index e726be8d2..61734b8dc 100644 --- a/settlement/dymension/dymension_test.go +++ b/settlement/dymension/dymension_test.go @@ -198,11 +198,9 @@ func TestPostBatch(t *testing.T) { case err := <-errChan: // Check for error from PostBatch. assert.NoError(t, err, "PostBatch should not produce an error") - case <-time.After(50 * time.Millisecond): - // Timeout case to avoid blocking forever if PostBatch doesn't return. + case <-time.After(100 * time.Millisecond): } // Wait for the batch to be submitted and submit an event notifying that the batch was accepted - time.Sleep(50 * time.Millisecond) if c.isBatchAcceptedHubEvent { batchAcceptedCh <- coretypes.ResultEvent{ Query: fmt.Sprintf("state_update.rollapp_id='%s'", ""), @@ -213,6 +211,9 @@ func TestPostBatch(t *testing.T) { }, } } + + time.Sleep(300 * time.Millisecond) + // Stop the hub client and wait for it to stop err = hubClient.Stop() require.NoError(err) From 6c1741a26c6a37b72f5c63cd6f3f78d5605bfc8a Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Tue, 7 May 2024 12:08:44 +0100 Subject: [PATCH 11/35] spelling, typo, format --- block/produce.go | 4 ++-- block/production_test.go | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/block/produce.go b/block/produce.go index 1e3034ed0..d5c4dd3a8 100644 --- a/block/produce.go +++ b/block/produce.go @@ -108,7 +108,7 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) er return nil } -func (m *Manager) updateAccumaltedSize(size uint64) { +func (m *Manager) updateAccumulatedSize(size uint64) { curr := m.AccumulatedProducedSize.Load() _ = m.AccumulatedProducedSize.CompareAndSwap(curr, curr+size) } @@ -205,7 +205,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er } size := uint64(block.ToProto().Size() + commit.ToProto().Size()) - m.updateAccumaltedSize(size) + m.updateAccumulatedSize(size) m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.AccumulatedProducedSize.Load()) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) diff --git a/block/production_test.go b/block/production_test.go index fed26282f..2c68c0942 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -95,7 +95,7 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { } func TestCreateEmptyBlocksNew(t *testing.T) { - t.Skip("FIXME: fails to submit tx to test the empty blocks feature") //TODO(#352) + t.Skip("FIXME: fails to submit tx to test the empty blocks feature") // TODO(#352) assert := assert.New(t) require := require.New(t) app := testutil.GetAppMock() @@ -233,7 +233,7 @@ func TestSubmissionTrigger(t *testing.T) { manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) require.NoError(err) - //validate initial accumalted is zero + // validate initial accumulated is zero require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) @@ -243,12 +243,12 @@ func TestSubmissionTrigger(t *testing.T) { // produce block go manager.ProduceBlockLoop(ctx) - //wait for block to be produced but not for submission threshold + // wait for block to be produced but not for submission threshold time.Sleep(400 * time.Millisecond) assert.Greater(manager.Store.Height(), uint64(0)) assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) - //wait for submission signal + // wait for submission signal sent := false select { case <-ctx.Done(): @@ -271,7 +271,7 @@ func TestStopBlockProduction(t *testing.T) { manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) require.NoError(err) - //validate initial accumalted is zero + // validate initial accumulated is zero require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) @@ -288,7 +288,7 @@ func TestStopBlockProduction(t *testing.T) { // produce block go manager.ProduceBlockLoop(ctx) - //validate block production works + // validate block production works time.Sleep(400 * time.Millisecond) assert.Greater(manager.Store.Height(), uint64(0)) assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) From b1c0131b3089022eade62b36835dbec48bf091ca Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Tue, 7 May 2024 12:11:14 +0100 Subject: [PATCH 12/35] spelling --- block/manager.go | 6 +++--- block/produce.go | 4 ++-- block/submit.go | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/block/manager.go b/block/manager.go index 6b9c36271..a7093f4dd 100644 --- a/block/manager.go +++ b/block/manager.go @@ -127,8 +127,8 @@ func NewManager( SLClient: settlementClient, Retriever: dalc.(da.BatchRetriever), SyncTargetDiode: diodes.NewOneToOne(1, nil), - ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 5), //TODO(#807): arbitrary number for now, gonna be refactored + ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), // allow capacity for multiple pending batches to support bursts + produceEmptyBlockCh: make(chan bool, 5), // TODO(#807): arbitrary number for now, gonna be refactored logger: logger, blockCache: make(map[uint64]CachedBlock), } @@ -171,7 +171,7 @@ func (m *Manager) Start(ctx context.Context, isAggregator bool) error { } if isAggregator { - //TODO: populate the accumualtedSize on startup + // TODO: populate the accumulatedSize on startup go m.ProduceBlockLoop(ctx) go m.SubmitLoop(ctx) } else { diff --git a/block/produce.go b/block/produce.go index d5c4dd3a8..22d5aa0da 100644 --- a/block/produce.go +++ b/block/produce.go @@ -74,9 +74,9 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { if m.shouldSubmitBatch() { select { case m.ShouldSubmitBatchCh <- true: - m.logger.Info("new batch accumualted, signal sent to submit the batch") + m.logger.Info("new batch accumulated, signal sent to submit the batch") default: - m.logger.Error("new batch accumualted, but channel is full, stopping block production until the signal is consumed") + m.logger.Error("new batch accumulated, but channel is full, stopping block production until the signal is consumed") // emit unhealthy event for the node evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) diff --git a/block/submit.go b/block/submit.go index 09f8b932c..f26372eea 100644 --- a/block/submit.go +++ b/block/submit.go @@ -10,7 +10,7 @@ import ( ) // SubmitLoop is the main loop for submitting blocks to the DA and SL layers. -// It is triggered by the shouldSubmitBatchCh channel, which is triggered by the block production loop when accumualted produced size is enogh to submit. +// It is triggered by the shouldSubmitBatchCh channel, which is triggered by the block production loop when accumulated produced size is enogh to submit. // It is also triggered by a BatchSubmitMaxTime timer to limit the time between submissions. func (m *Manager) SubmitLoop(ctx context.Context) { ticker := time.NewTicker(m.Conf.BatchSubmitMaxTime) @@ -98,7 +98,7 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB isLastBlockEmpty := nextBatch.Blocks[len(nextBatch.Blocks)-1].Data.Txs == nil if !isLastBlockEmpty { m.logger.Info("Last block in batch is not an empty block. Requesting for an empty block creation", "endHeight", actualEndHeight) - //TODO: remove from here and move to the block production loop. + // TODO: remove from here and move to the block production loop. m.produceEmptyBlockCh <- true } From 9364c7cf0bd124b6db1e0d64ea57e115557409ec Mon Sep 17 00:00:00 2001 From: Michael Tsitrin <114929630+mtsitrin@users.noreply.github.com> Date: Wed, 8 May 2024 09:52:00 +0300 Subject: [PATCH 13/35] feat: block progress to support ibc should be managed by produceloop (#814) --- block/manager.go | 11 +---------- block/manager_test.go | 8 ++++---- block/produce.go | 36 +++++++++++++++++++++++------------- block/submit.go | 16 ---------------- block/submit_test.go | 4 ++-- 5 files changed, 30 insertions(+), 45 deletions(-) diff --git a/block/manager.go b/block/manager.go index a7093f4dd..41345e0e8 100644 --- a/block/manager.go +++ b/block/manager.go @@ -62,16 +62,8 @@ type Manager struct { // Block production AccumulatedProducedSize atomic.Uint64 ShouldSubmitBatchCh chan bool - produceEmptyBlockCh chan bool lastSubmissionTime atomic.Int64 - /* - Protect against producing two blocks at once if the first one is taking a while - Also, used to protect against the block production that occurs when batch submission thread - creates its empty block. - */ - produceBlockMutex sync.Mutex - /* Protect against processing two blocks at once when there are two routines handling incoming gossiped blocks, and incoming DA blocks, respectively. @@ -127,8 +119,7 @@ func NewManager( SLClient: settlementClient, Retriever: dalc.(da.BatchRetriever), SyncTargetDiode: diodes.NewOneToOne(1, nil), - ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), // allow capacity for multiple pending batches to support bursts - produceEmptyBlockCh: make(chan bool, 5), // TODO(#807): arbitrary number for now, gonna be refactored + ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts logger: logger, blockCache: make(map[uint64]CachedBlock), } diff --git a/block/manager_test.go b/block/manager_test.go index fa1bea3b8..1a327921f 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -181,7 +181,7 @@ func TestProduceNewBlock(t *testing.T) { manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, nil, 1, 1, 0, proxyApp, nil) require.NoError(t, err) // Produce block - err = manager.ProduceAndGossipBlock(context.Background(), true) + _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.NoError(t, err) // Validate state is updated with the commit hash assert.Equal(t, uint64(1), manager.Store.Height()) @@ -208,7 +208,7 @@ func TestProducePendingBlock(t *testing.T) { _, err = manager.Store.SaveBlock(block, &block.LastCommit, nil) require.NoError(t, err) // Produce block - err = manager.ProduceAndGossipBlock(context.Background(), true) + _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.NoError(t, err) // Validate state is updated with the block that was saved in the store assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.LastState.LastBlockID.Hash)) @@ -306,7 +306,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { }) mockStore.ShouldFailSetHeight = tc.shouldFailSetSetHeight mockStore.ShoudFailUpdateState = tc.shouldFailUpdateState - _ = manager.ProduceAndGossipBlock(context.Background(), true) + _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.Equal(tc.expectedStoreHeight, manager.Store.Height(), tc.name) require.Equal(tc.expectedStateAppHash, manager.LastState.AppHash, tc.name) storeState, err := manager.Store.LoadState() @@ -360,7 +360,7 @@ func TestCreateNextDABatchWithBytesLimit(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // Produce blocks for i := 0; i < tc.blocksToProduce; i++ { - err := manager.ProduceAndGossipBlock(ctx, true) + _, _, err := manager.ProduceAndGossipBlock(ctx, true) assert.NoError(err) } diff --git a/block/produce.go b/block/produce.go index 22d5aa0da..b00c3d244 100644 --- a/block/produce.go +++ b/block/produce.go @@ -21,13 +21,13 @@ import ( // ProduceBlockLoop is calling publishBlock in a loop as long as we're synced. func (m *Manager) ProduceBlockLoop(ctx context.Context) { m.logger.Debug("Started produce loop") + produceEmptyBlock := true // Allow the initial block to be empty + // Main ticker for block production ticker := time.NewTicker(m.Conf.BlockTime) defer ticker.Stop() - // Allow the initial block to be empty - produceEmptyBlock := true - + // Timer for empty blockss var emptyBlocksTimer <-chan time.Time resetEmptyBlocksTimer := func() {} // Setup ticker for empty blocks if enabled @@ -41,18 +41,30 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { defer t.Stop() } + // Timer for block progression to support IBC transfers + forceCreationTimer := time.NewTimer(5 * time.Second) //TODO: change to own constant + defer forceCreationTimer.Stop() + forceCreationTimer.Stop() // Don't start it initially + resetForceCreationTimer := func(lastBlockEmpty bool) { + if lastBlockEmpty { + forceCreationTimer.Stop() + } else { + forceCreationTimer.Reset(5 * time.Second) + } + } + for { select { case <-ctx.Done(): // Context canceled return - case <-m.produceEmptyBlockCh: // If we got a request for an empty block produce it and don't wait for the ticker + case <-forceCreationTimer.C: // Force block creation produceEmptyBlock = true case <-emptyBlocksTimer: // Empty blocks timeout produceEmptyBlock = true m.logger.Debug(fmt.Sprintf("no transactions, producing empty block: elapsed: %.2f", m.Conf.EmptyBlocksMaxTime.Seconds())) // Produce block case <-ticker.C: - err := m.ProduceAndGossipBlock(ctx, produceEmptyBlock) + block, _, err := m.ProduceAndGossipBlock(ctx, produceEmptyBlock) if errors.Is(err, context.Canceled) { m.logger.Error("produce and gossip: context canceled", "error", err) return @@ -69,6 +81,8 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { continue } resetEmptyBlocksTimer() + isLastBlockEmpty := len(block.Data.Txs) == 0 + resetForceCreationTimer(isLastBlockEmpty) // Check if we should submit the accumulated data if m.shouldSubmitBatch() { @@ -87,25 +101,23 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { evt = &events.DataHealthStatus{Error: nil} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) } - m.produceBlockMutex.Lock() m.AccumulatedProducedSize.Store(0) - m.produceBlockMutex.Unlock() } } } } -func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) error { +func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) (*types.Block, *types.Commit, error) { block, commit, err := m.produceBlock(allowEmpty) if err != nil { - return fmt.Errorf("produce block: %w", err) + return nil, nil, fmt.Errorf("produce block: %w", err) } if err := m.gossipBlock(ctx, *block, *commit); err != nil { - return fmt.Errorf("gossip block: %w", err) + return nil, nil, fmt.Errorf("gossip block: %w", err) } - return nil + return block, commit, nil } func (m *Manager) updateAccumulatedSize(size uint64) { @@ -121,8 +133,6 @@ func (m *Manager) shouldSubmitBatch() bool { } func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { - m.produceBlockMutex.Lock() - defer m.produceBlockMutex.Unlock() var ( lastCommit *types.Commit lastHeaderHash [32]byte diff --git a/block/submit.go b/block/submit.go index f26372eea..9d1d9eb61 100644 --- a/block/submit.go +++ b/block/submit.go @@ -45,12 +45,6 @@ func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { return nil // No new blocks have been produced } - // We try and produce an empty block to make sure relevant ibc messages will pass through during the batch submission: https://github.com/dymensionxyz/research/issues/173. - err := m.ProduceAndGossipBlock(ctx, true) - if err != nil { - m.logger.Error("Produce and gossip empty block.", "error", err) - } - nextBatch, err := m.createNextBatch() if err != nil { return fmt.Errorf("create next batch: %w", err) @@ -92,16 +86,6 @@ func (m *Manager) submitNextBatchToDA(nextBatch *types.Batch) (*da.ResultSubmitB startHeight := nextBatch.StartHeight actualEndHeight := nextBatch.EndHeight - // Verify the last block in the batch is an empty block and that no ibc messages has accidentally passed through. - // This block may not be empty if another block has passed it in line. If that's the case our empty block request will - // be sent to the next batch. - isLastBlockEmpty := nextBatch.Blocks[len(nextBatch.Blocks)-1].Data.Txs == nil - if !isLastBlockEmpty { - m.logger.Info("Last block in batch is not an empty block. Requesting for an empty block creation", "endHeight", actualEndHeight) - // TODO: remove from here and move to the block production loop. - m.produceEmptyBlockCh <- true - } - // Submit batch to the DA m.logger.Info("Submitting next batch", "startHeight", startHeight, "endHeight", actualEndHeight, "size", nextBatch.ToProto().Size()) resultSubmitToDA := m.DAClient.SubmitBatch(nextBatch) diff --git a/block/submit_test.go b/block/submit_test.go index 0562f189a..7f2110150 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -42,7 +42,7 @@ func TestBatchSubmissionHappyFlow(t *testing.T) { require.Zero(manager.SyncTarget.Load()) // Produce block and validate that we produced blocks - err = manager.ProduceAndGossipBlock(ctx, true) + _, _, err = manager.ProduceAndGossipBlock(ctx, true) require.NoError(err) assert.Greater(t, manager.Store.Height(), initialHeight) assert.Zero(t, manager.SyncTarget.Load()) @@ -89,7 +89,7 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { require.Zero(manager.SyncTarget.Load()) // Produce block and validate that we produced blocks - err = manager.ProduceAndGossipBlock(ctx, true) + _, _, err = manager.ProduceAndGossipBlock(ctx, true) require.NoError(err) assert.Greater(t, manager.Store.Height(), initialHeight) assert.Zero(t, manager.SyncTarget.Load()) From 9f76e0bd82be19b31607be67e372c0248e4699de Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 8 May 2024 10:34:19 +0300 Subject: [PATCH 14/35] refactored the signaling --- block/manager.go | 37 ++++++++++--------- block/manager_test.go | 2 +- block/produce.go | 45 ++++------------------- block/production_test.go | 78 +++++++++------------------------------- block/submit.go | 57 ++++++++++++++++++++++++++++- block/submit_test.go | 76 +++++++++++++++++++++++++++++++++++---- 6 files changed, 170 insertions(+), 125 deletions(-) diff --git a/block/manager.go b/block/manager.go index 41345e0e8..117e8705b 100644 --- a/block/manager.go +++ b/block/manager.go @@ -33,6 +33,8 @@ import ( ) const ( + // max amount of pending batches to be submitted. block production will be paused if this limit is reached. + // TODO: make this configurable maxSupportedBatchSkew = 10 ) @@ -60,8 +62,11 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production + ProducedSizeCh chan uint64 // channel for the producer to report the size of the block it produced + produceEmptyBlockCh chan bool + + // Submitter AccumulatedProducedSize atomic.Uint64 - ShouldSubmitBatchCh chan bool lastSubmissionTime atomic.Int64 /* @@ -107,21 +112,21 @@ func NewManager( } agg := &Manager{ - Pubsub: pubsub, - p2pClient: p2pClient, - ProposerKey: proposerKey, - Conf: conf, - Genesis: genesis, - LastState: s, - Store: store, - Executor: exec, - DAClient: dalc, - SLClient: settlementClient, - Retriever: dalc.(da.BatchRetriever), - SyncTargetDiode: diodes.NewOneToOne(1, nil), - ShouldSubmitBatchCh: make(chan bool, maxSupportedBatchSkew), //allow capacity for multiple pending batches to support bursts - logger: logger, - blockCache: make(map[uint64]CachedBlock), + Pubsub: pubsub, + p2pClient: p2pClient, + ProposerKey: proposerKey, + Conf: conf, + Genesis: genesis, + LastState: s, + Store: store, + Executor: exec, + DAClient: dalc, + SLClient: settlementClient, + Retriever: dalc.(da.BatchRetriever), + SyncTargetDiode: diodes.NewOneToOne(1, nil), + ProducedSizeCh: make(chan uint64), + logger: logger, + blockCache: make(map[uint64]CachedBlock), } return agg, nil diff --git a/block/manager_test.go b/block/manager_test.go index 1a327921f..1c9cccc71 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -306,7 +306,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { }) mockStore.ShouldFailSetHeight = tc.shouldFailSetSetHeight mockStore.ShoudFailUpdateState = tc.shouldFailUpdateState - _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) + _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) require.Equal(tc.expectedStoreHeight, manager.Store.Height(), tc.name) require.Equal(tc.expectedStateAppHash, manager.LastState.AppHash, tc.name) storeState, err := manager.Store.LoadState() diff --git a/block/produce.go b/block/produce.go index b00c3d244..3ddfa0233 100644 --- a/block/produce.go +++ b/block/produce.go @@ -6,9 +6,6 @@ import ( "fmt" "time" - "github.com/dymensionxyz/dymint/node/events" - uevent "github.com/dymensionxyz/dymint/utils/event" - "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" @@ -64,7 +61,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { m.logger.Debug(fmt.Sprintf("no transactions, producing empty block: elapsed: %.2f", m.Conf.EmptyBlocksMaxTime.Seconds())) // Produce block case <-ticker.C: - block, _, err := m.ProduceAndGossipBlock(ctx, produceEmptyBlock) + block, commit, err := m.ProduceAndGossipBlock(ctx, produceEmptyBlock) if errors.Is(err, context.Canceled) { m.logger.Error("produce and gossip: context canceled", "error", err) return @@ -84,25 +81,10 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { isLastBlockEmpty := len(block.Data.Txs) == 0 resetForceCreationTimer(isLastBlockEmpty) - // Check if we should submit the accumulated data - if m.shouldSubmitBatch() { - select { - case m.ShouldSubmitBatchCh <- true: - m.logger.Info("new batch accumulated, signal sent to submit the batch") - default: - m.logger.Error("new batch accumulated, but channel is full, stopping block production until the signal is consumed") - // emit unhealthy event for the node - evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} - uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) - // wait for the signal to be consumed - m.ShouldSubmitBatchCh <- true - m.logger.Info("resumed block production") - // emit healthy event for the node - evt = &events.DataHealthStatus{Error: nil} - uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) - } - m.AccumulatedProducedSize.Store(0) - } + size := uint64(block.ToProto().Size()) + uint64(commit.ToProto().Size()) + // Send the size to the accumulated size channel + // This will block in case the submitter is too slow and it's buffer is full + m.ProducedSizeCh <- size } } } @@ -120,18 +102,6 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) (* return block, commit, nil } -func (m *Manager) updateAccumulatedSize(size uint64) { - curr := m.AccumulatedProducedSize.Load() - _ = m.AccumulatedProducedSize.CompareAndSwap(curr, curr+size) -} - -// check if we should submit the accumulated data -func (m *Manager) shouldSubmitBatch() bool { - // Check if accumulated size is greater than the max size - // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) - return m.AccumulatedProducedSize.Load() > m.Conf.BlockBatchMaxSizeBytes -} - func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { var ( lastCommit *types.Commit @@ -214,10 +184,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er return nil, nil, fmt.Errorf("apply block: %w: %w", err, ErrNonRecoverable) } - size := uint64(block.ToProto().Size() + commit.ToProto().Size()) - m.updateAccumulatedSize(size) - - m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs), "accumulated_size", m.AccumulatedProducedSize.Load()) + m.logger.Info("block created", "height", newHeight, "num_tx", len(block.Data.Txs)) types.RollappBlockSizeBytesGauge.Set(float64(len(block.Data.Txs))) types.RollappBlockSizeTxsGauge.Set(float64(len(block.Data.Txs))) types.RollappHeightGauge.Set(float64(newHeight)) diff --git a/block/production_test.go b/block/production_test.go index 2c68c0942..cdcd30b80 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -3,6 +3,7 @@ package block_test import ( "context" "fmt" + "sync" "testing" "time" @@ -206,62 +207,6 @@ func TestInvalidBatch(t *testing.T) { } } -func TestSubmissionTrigger(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - - cases := []struct { - name string - blockBatchMaxSizeBytes uint64 - expectedSubmission bool - }{ - { - name: "block batch max size is fullfilled", - blockBatchMaxSizeBytes: 1000, - expectedSubmission: true, - }, - { - name: "block batch max size is not fullfilled", - blockBatchMaxSizeBytes: 100000, - expectedSubmission: false, - }, - } - - for _, c := range cases { - managerConfig := testutil.GetManagerConfig() - managerConfig.BlockBatchMaxSizeBytes = c.blockBatchMaxSizeBytes - manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) - require.NoError(err) - - // validate initial accumulated is zero - require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) - assert.Equal(manager.Store.Height(), uint64(0)) - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - // produce block - go manager.ProduceBlockLoop(ctx) - - // wait for block to be produced but not for submission threshold - time.Sleep(400 * time.Millisecond) - assert.Greater(manager.Store.Height(), uint64(0)) - assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) - - // wait for submission signal - sent := false - select { - case <-ctx.Done(): - case <-manager.ShouldSubmitBatchCh: - sent = true - time.Sleep(100 * time.Millisecond) - assert.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) - } - - assert.Equal(c.expectedSubmission, sent) - } -} - func TestStopBlockProduction(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -282,11 +227,22 @@ func TestStopBlockProduction(t *testing.T) { } go uevent.MustSubscribe(context.Background(), manager.Pubsub, "HealthStatusHandler", events.QueryHealthStatus, cb, log.TestingLogger()) - ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second) + var wg sync.WaitGroup + wg.Add(2) // Add 2 because we have 2 goroutines + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - // produce block - go manager.ProduceBlockLoop(ctx) + go func() { + manager.ProduceBlockLoop(ctx) + wg.Done() // Decrease counter when this goroutine finishes + }() + + toSubmit := make(chan bool) + go func() { + manager.AccumulatedDataLoop(ctx, toSubmit) + wg.Done() // Decrease counter when this goroutine finishes + }() // validate block production works time.Sleep(400 * time.Millisecond) @@ -309,12 +265,12 @@ func TestStopBlockProduction(t *testing.T) { assert.Equal(stoppedHeight, manager.Store.Height()) // consume the signal - <-manager.ShouldSubmitBatchCh + <-toSubmit // check for health status event and block production to continue select { case <-ctx.Done(): - t.Error("expected unhealthy event") + t.Error("expected health event") case err := <-eventRecievedCh: assert.NoError(err) } diff --git a/block/submit.go b/block/submit.go index 9d1d9eb61..01498f5ee 100644 --- a/block/submit.go +++ b/block/submit.go @@ -6,23 +6,45 @@ import ( "time" "github.com/dymensionxyz/dymint/da" + "github.com/dymensionxyz/dymint/node/events" "github.com/dymensionxyz/dymint/types" + uevent "github.com/dymensionxyz/dymint/utils/event" ) // SubmitLoop is the main loop for submitting blocks to the DA and SL layers. // It is triggered by the shouldSubmitBatchCh channel, which is triggered by the block production loop when accumulated produced size is enogh to submit. // It is also triggered by a BatchSubmitMaxTime timer to limit the time between submissions. func (m *Manager) SubmitLoop(ctx context.Context) { + // ticker to limit the time between submissions ticker := time.NewTicker(m.Conf.BatchSubmitMaxTime) defer ticker.Stop() + // get produced size from the block production loop and signal to submit the batch when batch size reached + toSubmit := make(chan bool, maxSupportedBatchSkew) + // defer to clear the channels + defer func() { + for { + select { + case <-toSubmit: + case <-m.ProducedSizeCh: + default: + return + } + } + }() + + go m.AccumulatedDataLoop(ctx, toSubmit) + for { select { // Context canceled case <-ctx.Done(): return - case <-m.ShouldSubmitBatchCh: // Trigger by block production + case <-toSubmit: // Trigger by block production case <-ticker.C: // trigger by max time + // reset the accumulated size when triggered by time, + // as we have less data than batch size, and we gonna submit all our data anyway + m.AccumulatedProducedSize.Store(0) } // modular submission methods have own retries mechanism. @@ -35,6 +57,39 @@ func (m *Manager) SubmitLoop(ctx context.Context) { } } +func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { + for { + select { + case <-ctx.Done(): + return + case size := <-m.ProducedSizeCh: + total := m.AccumulatedProducedSize.Add(size) + + // Check if accumulated size is greater than the max size + // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) + if total > m.Conf.BlockBatchMaxSizeBytes { + select { + case toSubmit <- true: + m.logger.Info("new batch accumulated, signal sent to submit the batch") + default: + m.logger.Error("new batch accumulated, but channel is full, stopping block production until the signal is consumed") + // emit unhealthy event for the node + evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} + uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + // wait for the signal to be consumed + toSubmit <- true + m.logger.Info("resumed block production") + // emit healthy event for the node + evt = &events.DataHealthStatus{Error: nil} + uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) + } + m.AccumulatedProducedSize.Store(0) + } + } + + } +} + // HandleSubmissionTrigger processes the sublayer submission trigger event. It checks if there are new blocks produced since the last submission. // If there are, it attempts to submit a batch of blocks. It then attempts to produce an empty block to ensure IBC messages // pass through during the batch submission process due to proofs requires for ibc messages only exist on the next block. diff --git a/block/submit_test.go b/block/submit_test.go index 7f2110150..2f2ac3351 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -104,12 +104,11 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) } -func TestBatchSubmissionAfterTimeout(t *testing.T) { +func TestSubmissionByTime(t *testing.T) { const ( // large batch size, so we expect the trigger to be the timeout - submitTimeout = 2 * time.Second + submitTimeout = 1 * time.Second blockTime = 200 * time.Millisecond - runTime = submitTimeout + 1*time.Second ) require := require.New(t) @@ -138,22 +137,85 @@ func TestBatchSubmissionAfterTimeout(t *testing.T) { require.Zero(manager.SyncTarget.Load()) var wg sync.WaitGroup - mCtx, cancel := context.WithTimeout(context.Background(), runTime) + mCtx, cancel := context.WithTimeout(context.Background(), 2*submitTimeout) defer cancel() wg.Add(2) // Add 2 because we have 2 goroutines go func() { - defer wg.Done() // Decrease counter when this goroutine finishes manager.ProduceBlockLoop(mCtx) + wg.Done() // Decrease counter when this goroutine finishes }() go func() { - defer wg.Done() // Decrease counter when this goroutine finishes manager.SubmitLoop(mCtx) + wg.Done() // Decrease counter when this goroutine finishes }() - <-mCtx.Done() wg.Wait() // Wait for all goroutines to finish require.True(manager.SyncTarget.Load() > 0) } + +func TestSubmissionByBatchSize(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + cases := []struct { + name string + blockBatchMaxSizeBytes uint64 + expectedSubmission bool + }{ + { + name: "block batch max size is fulfilled", + blockBatchMaxSizeBytes: 2000, + expectedSubmission: true, + }, + { + name: "block batch max size is not fulfilled", + blockBatchMaxSizeBytes: 100000, + expectedSubmission: false, + }, + } + + for _, c := range cases { + managerConfig := testutil.GetManagerConfig() + managerConfig.BlockBatchMaxSizeBytes = c.blockBatchMaxSizeBytes + manager, err := testutil.GetManager(managerConfig, nil, nil, 1, 1, 0, nil, nil) + require.NoError(err) + + // validate initial accumulated is zero + require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) + assert.Equal(manager.Store.Height(), uint64(0)) + + var wg sync.WaitGroup + wg.Add(2) // Add 2 because we have 2 goroutines + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + go func() { + manager.ProduceBlockLoop(ctx) + wg.Done() // Decrease counter when this goroutine finishes + }() + + go func() { + manager.SubmitLoop(ctx) + wg.Done() // Decrease counter when this goroutine finishes + }() + + // wait for block to be produced but not for submission threshold + time.Sleep(200 * time.Millisecond) + // assert block produced but nothing submitted yet + assert.Greater(manager.Store.Height(), uint64(0)) + assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) + assert.Zero(manager.SyncTarget.Load()) + + wg.Wait() // Wait for all goroutines to finish + + if c.expectedSubmission { + assert.Positive(manager.SyncTarget.Load()) + } else { + assert.Zero(manager.SyncTarget.Load()) + } + } +} From c7ffbf12454da56d76ec66137fc60bceca36fd1c Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 8 May 2024 10:35:29 +0300 Subject: [PATCH 15/35] added ctx support for blocking signals --- block/produce.go | 8 ++++++-- block/production_test.go | 5 +++++ block/submit.go | 27 +++++++++++++++++---------- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/block/produce.go b/block/produce.go index 3ddfa0233..5503fed7e 100644 --- a/block/produce.go +++ b/block/produce.go @@ -81,10 +81,14 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { isLastBlockEmpty := len(block.Data.Txs) == 0 resetForceCreationTimer(isLastBlockEmpty) - size := uint64(block.ToProto().Size()) + uint64(commit.ToProto().Size()) // Send the size to the accumulated size channel // This will block in case the submitter is too slow and it's buffer is full - m.ProducedSizeCh <- size + size := uint64(block.ToProto().Size()) + uint64(commit.ToProto().Size()) + select { + case <-ctx.Done(): + return + case m.ProducedSizeCh <- size: + } } } } diff --git a/block/production_test.go b/block/production_test.go index cdcd30b80..98015230d 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -59,6 +59,11 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { defer cancel() go manager.ProduceBlockLoop(mCtx) go managerWithEmptyBlocks.ProduceBlockLoop(mCtx) + + buf1 := make(chan bool, 100) //dummy to avoid unhealthy event + buf2 := make(chan bool, 100) //dummy to avoid unhealthy event + go manager.AccumulatedDataLoop(mCtx, buf1) + go managerWithEmptyBlocks.AccumulatedDataLoop(mCtx, buf2) <-mCtx.Done() require.Greater(manager.Store.Height(), initialHeight) diff --git a/block/submit.go b/block/submit.go index 01498f5ee..3a110b069 100644 --- a/block/submit.go +++ b/block/submit.go @@ -20,30 +20,30 @@ func (m *Manager) SubmitLoop(ctx context.Context) { defer ticker.Stop() // get produced size from the block production loop and signal to submit the batch when batch size reached - toSubmit := make(chan bool, maxSupportedBatchSkew) - // defer to clear the channels + submitByAccumulatedSizeCh := make(chan bool, maxSupportedBatchSkew) + go m.AccumulatedDataLoop(ctx, submitByAccumulatedSizeCh) + + // defer func to clear the channels defer func() { for { select { - case <-toSubmit: case <-m.ProducedSizeCh: + case <-submitByAccumulatedSizeCh: default: return } } }() - go m.AccumulatedDataLoop(ctx, toSubmit) - for { select { // Context canceled case <-ctx.Done(): return - case <-toSubmit: // Trigger by block production + case <-submitByAccumulatedSizeCh: // Trigger by block production case <-ticker.C: // trigger by max time // reset the accumulated size when triggered by time, - // as we have less data than batch size, and we gonna submit all our data anyway + // as we gonna submit all our data anyway m.AccumulatedProducedSize.Store(0) } @@ -57,6 +57,10 @@ func (m *Manager) SubmitLoop(ctx context.Context) { } } +// AccumulatedDataLoop is the main loop for accumulating the produced data size. +// It is triggered by the ProducedSizeCh channel, which is triggered by the block production loop when a new block is produced. +// It accumulates the size of the produced data and triggers the submission of the batch when the accumulated size is greater than the max size. +// It also emits a health status event when the submission channel is full. func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { for { select { @@ -66,7 +70,7 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { total := m.AccumulatedProducedSize.Add(size) // Check if accumulated size is greater than the max size - // TODO: allow some tolerance for block size (aim for BlockBatchMaxSize +- 10%) + // TODO: allow some tolerance for block size (e.g support for BlockBatchMaxSize +- 10%) if total > m.Conf.BlockBatchMaxSizeBytes { select { case toSubmit <- true: @@ -77,7 +81,11 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { evt := &events.DataHealthStatus{Error: fmt.Errorf("submission channel is full")} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) // wait for the signal to be consumed - toSubmit <- true + select { + case <-ctx.Done(): + return + case toSubmit <- true: + } m.logger.Info("resumed block production") // emit healthy event for the node evt = &events.DataHealthStatus{Error: nil} @@ -86,7 +94,6 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { m.AccumulatedProducedSize.Store(0) } } - } } From a501fa257c23efb556483462bfe93538313c166c Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 8 May 2024 11:07:58 +0300 Subject: [PATCH 16/35] cleanup. comments --- block/manager.go | 9 +++---- block/produce.go | 4 +-- block/production_test.go | 17 +++++++------ block/submit.go | 17 ++++++------- block/submit_test.go | 6 +++-- da/avail/avail.go | 4 +-- node/node.go | 34 -------------------------- p2p/client.go | 1 - rpc/server.go | 2 +- settlement/dymension/dymension.go | 4 +-- settlement/dymension/dymension_test.go | 9 ------- 11 files changed, 33 insertions(+), 74 deletions(-) diff --git a/block/manager.go b/block/manager.go index 117e8705b..4b2eed35d 100644 --- a/block/manager.go +++ b/block/manager.go @@ -62,12 +62,11 @@ type Manager struct { SyncTarget atomic.Uint64 // Block production - ProducedSizeCh chan uint64 // channel for the producer to report the size of the block it produced - produceEmptyBlockCh chan bool + producedSizeCh chan uint64 // channel for the producer to report the size of the block it produced // Submitter - AccumulatedProducedSize atomic.Uint64 - lastSubmissionTime atomic.Int64 + AccumulatedBatchSize atomic.Uint64 + lastSubmissionTime atomic.Int64 /* Protect against processing two blocks at once when there are two routines handling incoming gossiped blocks, @@ -124,7 +123,7 @@ func NewManager( SLClient: settlementClient, Retriever: dalc.(da.BatchRetriever), SyncTargetDiode: diodes.NewOneToOne(1, nil), - ProducedSizeCh: make(chan uint64), + producedSizeCh: make(chan uint64), logger: logger, blockCache: make(map[uint64]CachedBlock), } diff --git a/block/produce.go b/block/produce.go index 5503fed7e..3613bee43 100644 --- a/block/produce.go +++ b/block/produce.go @@ -52,7 +52,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { for { select { - case <-ctx.Done(): // Context canceled + case <-ctx.Done(): return case <-forceCreationTimer.C: // Force block creation produceEmptyBlock = true @@ -87,7 +87,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context) { select { case <-ctx.Done(): return - case m.ProducedSizeCh <- size: + case m.producedSizeCh <- size: } } } diff --git a/block/production_test.go b/block/production_test.go index 98015230d..a48ba8372 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -101,7 +101,8 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { } func TestCreateEmptyBlocksNew(t *testing.T) { - t.Skip("FIXME: fails to submit tx to test the empty blocks feature") // TODO(#352) + // TODO(https://github.com/dymensionxyz/dymint/issues/352) + t.Skip("FIXME: fails to submit tx to test the empty blocks feature") assert := assert.New(t) require := require.New(t) app := testutil.GetAppMock() @@ -212,6 +213,8 @@ func TestInvalidBatch(t *testing.T) { } } +// TestStopBlockProduction tests the block production stops when submitter is full +// and resumes when submitter is ready to accept more batches func TestStopBlockProduction(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -222,13 +225,13 @@ func TestStopBlockProduction(t *testing.T) { require.NoError(err) // validate initial accumulated is zero - require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) + require.Equal(manager.AccumulatedBatchSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) // subscribe to health status event - eventRecievedCh := make(chan error) + eventReceivedCh := make(chan error) cb := func(event pubsub.Message) { - eventRecievedCh <- event.Data().(*events.DataHealthStatus).Error + eventReceivedCh <- event.Data().(*events.DataHealthStatus).Error } go uevent.MustSubscribe(context.Background(), manager.Pubsub, "HealthStatusHandler", events.QueryHealthStatus, cb, log.TestingLogger()) @@ -252,14 +255,14 @@ func TestStopBlockProduction(t *testing.T) { // validate block production works time.Sleep(400 * time.Millisecond) assert.Greater(manager.Store.Height(), uint64(0)) - assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) + assert.Greater(manager.AccumulatedBatchSize.Load(), uint64(0)) // we don't read from the submit channel, so we assume it get full // we expect the block production to stop and unhealthy event to be emitted select { case <-ctx.Done(): t.Error("expected unhealthy event") - case err := <-eventRecievedCh: + case err := <-eventReceivedCh: assert.Error(err) } @@ -276,7 +279,7 @@ func TestStopBlockProduction(t *testing.T) { select { case <-ctx.Done(): t.Error("expected health event") - case err := <-eventRecievedCh: + case err := <-eventReceivedCh: assert.NoError(err) } diff --git a/block/submit.go b/block/submit.go index 3a110b069..09d19e1f7 100644 --- a/block/submit.go +++ b/block/submit.go @@ -23,11 +23,11 @@ func (m *Manager) SubmitLoop(ctx context.Context) { submitByAccumulatedSizeCh := make(chan bool, maxSupportedBatchSkew) go m.AccumulatedDataLoop(ctx, submitByAccumulatedSizeCh) - // defer func to clear the channels + // defer func to clear the channels to release blocked goroutines on shutdown defer func() { for { select { - case <-m.ProducedSizeCh: + case <-m.producedSizeCh: case <-submitByAccumulatedSizeCh: default: return @@ -37,14 +37,13 @@ func (m *Manager) SubmitLoop(ctx context.Context) { for { select { - // Context canceled case <-ctx.Done(): return - case <-submitByAccumulatedSizeCh: // Trigger by block production - case <-ticker.C: // trigger by max time + case <-submitByAccumulatedSizeCh: // block production + case <-ticker.C: // max time // reset the accumulated size when triggered by time, // as we gonna submit all our data anyway - m.AccumulatedProducedSize.Store(0) + m.AccumulatedBatchSize.Store(0) } // modular submission methods have own retries mechanism. @@ -66,8 +65,8 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { select { case <-ctx.Done(): return - case size := <-m.ProducedSizeCh: - total := m.AccumulatedProducedSize.Add(size) + case size := <-m.producedSizeCh: + total := m.AccumulatedBatchSize.Add(size) // Check if accumulated size is greater than the max size // TODO: allow some tolerance for block size (e.g support for BlockBatchMaxSize +- 10%) @@ -91,7 +90,7 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { evt = &events.DataHealthStatus{Error: nil} uevent.MustPublish(ctx, m.Pubsub, evt, events.HealthStatusList) } - m.AccumulatedProducedSize.Store(0) + m.AccumulatedBatchSize.Store(0) } } } diff --git a/block/submit_test.go b/block/submit_test.go index 2f2ac3351..8e4534336 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -104,6 +104,7 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) } +// TestSubmissionByTime tests the submission trigger by time func TestSubmissionByTime(t *testing.T) { const ( // large batch size, so we expect the trigger to be the timeout @@ -156,6 +157,7 @@ func TestSubmissionByTime(t *testing.T) { require.True(manager.SyncTarget.Load() > 0) } +// TestSubmissionByBatchSize tests the submission trigger by batch size func TestSubmissionByBatchSize(t *testing.T) { assert := assert.New(t) require := require.New(t) @@ -184,7 +186,7 @@ func TestSubmissionByBatchSize(t *testing.T) { require.NoError(err) // validate initial accumulated is zero - require.Equal(manager.AccumulatedProducedSize.Load(), uint64(0)) + require.Equal(manager.AccumulatedBatchSize.Load(), uint64(0)) assert.Equal(manager.Store.Height(), uint64(0)) var wg sync.WaitGroup @@ -207,7 +209,7 @@ func TestSubmissionByBatchSize(t *testing.T) { time.Sleep(200 * time.Millisecond) // assert block produced but nothing submitted yet assert.Greater(manager.Store.Height(), uint64(0)) - assert.Greater(manager.AccumulatedProducedSize.Load(), uint64(0)) + assert.Greater(manager.AccumulatedBatchSize.Load(), uint64(0)) assert.Zero(manager.SyncTarget.Load()) wg.Wait() // Wait for all goroutines to finish diff --git a/da/avail/avail.go b/da/avail/avail.go index 0d4e66a22..e5b20eabd 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -392,11 +392,11 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { inclusionTimer.Reset(c.txInclusionTimeout) continue } else { - recievedStatus, err := status.MarshalJSON() + receivedStatus, err := status.MarshalJSON() if err != nil { return 0, fmt.Errorf("MarshalJSON of received status: %w", err) } - c.logger.Debug("unsupported status, still waiting for inclusion", "status", string(recievedStatus)) + c.logger.Debug("unsupported status, still waiting for inclusion", "status", string(receivedStatus)) continue } case <-inclusionTimer.C: diff --git a/node/node.go b/node/node.go index f9440bbc6..1ad125f2b 100644 --- a/node/node.go +++ b/node/node.go @@ -4,10 +4,8 @@ import ( "context" "encoding/base64" "encoding/json" - "errors" "fmt" "net/http" - "sync" "time" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -51,36 +49,6 @@ const ( genesisChunkSize = 16 * 1024 * 1024 // 16 MiB ) -type baseLayerHealth struct { - settlement error - da error - mu sync.RWMutex -} - -func (bl *baseLayerHealth) setSettlement(err error) { - bl.mu.Lock() - defer bl.mu.Unlock() - if err != nil { - err = fmt.Errorf("settlement: %w", err) - } - bl.settlement = err -} - -func (bl *baseLayerHealth) setDA(err error) { - bl.mu.Lock() - defer bl.mu.Unlock() - if err != nil { - err = fmt.Errorf("da: %w", err) - } - bl.da = err -} - -func (bl *baseLayerHealth) get() error { - bl.mu.RLock() - defer bl.mu.RUnlock() - return errors.Join(bl.settlement, bl.da) -} - // Node represents a client node in Dymint network. // It connects all the components and orchestrates their work. type Node struct { @@ -110,8 +78,6 @@ type Node struct { BlockIndexer indexer.BlockIndexer IndexerService *txindex.IndexerService - baseLayerHealth baseLayerHealth - // keep context here only because of API compatibility // - it's used in `OnStart` (defined in service.Service interface) Ctx context.Context diff --git a/p2p/client.go b/p2p/client.go index 0ec0cfb51..b33bc7889 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -419,7 +419,6 @@ func (c *Client) bootstrapLoop(ctx context.Context) { defer ticker.Stop() for { select { - // Context canceled case <-ctx.Done(): return case <-ticker.C: diff --git a/rpc/server.go b/rpc/server.go index 0dc23600e..feea78a00 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -108,7 +108,7 @@ func (s *Server) startEventListener() { func (s *Server) onNodeHealthUpdate(event pubsub.Message) { eventData := event.Data().(*events.DataHealthStatus) if eventData.Error != nil { - s.Logger.Error("node is unhealthy: got error health check from sublayer", "error", eventData.Error) + s.Logger.Error("node is unhealthy", "error", eventData.Error) } s.healthMU.Lock() defer s.healthMU.Unlock() diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index a201fa2ee..2d5c94466 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -231,7 +231,7 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * err := d.submitBatch(msgUpdateState) if err != nil { d.logger.Error( - "submit batch", + "Submit batch", "startHeight", batch.StartHeight, "endHeight", @@ -266,7 +266,7 @@ func (d *HubClient) PostBatch(batch *types.Batch, daClient da.Client, daResult * includedBatch, err := d.waitForBatchInclusion(batch.StartHeight) if err != nil { d.logger.Error( - "wait for batch inclusion", + "Wait for batch inclusion", "startHeight", batch.StartHeight, "endHeight", diff --git a/settlement/dymension/dymension_test.go b/settlement/dymension/dymension_test.go index 61734b8dc..68a6b7b03 100644 --- a/settlement/dymension/dymension_test.go +++ b/settlement/dymension/dymension_test.go @@ -10,8 +10,6 @@ import ( "google.golang.org/grpc/status" - "github.com/dymensionxyz/dymint/gerr" - "github.com/tendermint/tendermint/libs/log" "github.com/cosmos/cosmos-sdk/crypto/keyring" @@ -118,7 +116,6 @@ func TestPostBatch(t *testing.T) { isBatchAcceptedHubEvent bool shouldMockBatchIncluded bool isBatchIncludedSuccess bool - expectedError error }{ { name: "SubmitBatchFailure", @@ -126,7 +123,6 @@ func TestPostBatch(t *testing.T) { isBatchAcceptedHubEvent: false, shouldMockBatchIncluded: true, isBatchIncludedSuccess: false, - expectedError: submitBatchError, }, { name: "SubmitBatchSuccessNoBatchAcceptedHubEventNotIncluded", @@ -134,7 +130,6 @@ func TestPostBatch(t *testing.T) { isBatchAcceptedHubEvent: false, shouldMockBatchIncluded: true, isBatchIncludedSuccess: false, - expectedError: gerr.ErrNotFound, }, { name: "SubmitBatchSuccessNotAcceptedYesIncluded", @@ -142,14 +137,12 @@ func TestPostBatch(t *testing.T) { isBatchAcceptedHubEvent: false, shouldMockBatchIncluded: true, isBatchIncludedSuccess: true, - expectedError: nil, }, { name: "SubmitBatchSuccessAndAccepted", isBatchSubmitSuccess: true, isBatchAcceptedHubEvent: true, shouldMockBatchIncluded: false, - expectedError: nil, }, } @@ -212,8 +205,6 @@ func TestPostBatch(t *testing.T) { } } - time.Sleep(300 * time.Millisecond) - // Stop the hub client and wait for it to stop err = hubClient.Stop() require.NoError(err) From 72a88938b6008caded688742eba456bf300dad47 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 11:18:24 +0300 Subject: [PATCH 17/35] moved indexers to own package --- .../blockindexer}/block.go | 0 .../block => indexers/blockindexer}/kv/kv.go | 18 +++++++++-------- .../blockindexer}/kv/kv_test.go | 2 +- .../blockindexer}/kv/util.go | 0 .../blockindexer}/null/null.go | 2 +- .../blockindexer}/query_range.go | 0 {state => indexers}/txindex/indexer.go | 2 -- .../txindex/indexer_service.go | 2 +- .../txindex/indexer_service_test.go | 6 +++--- {state => indexers}/txindex/kv/kv.go | 20 +++++++++---------- .../txindex/kv/kv_bench_test.go | 0 {state => indexers}/txindex/kv/kv_test.go | 2 +- {state => indexers}/txindex/kv/utils.go | 0 {state => indexers}/txindex/kv/utils_test.go | 0 {state => indexers}/txindex/null/null.go | 0 node/node.go | 8 ++++---- 16 files changed, 31 insertions(+), 31 deletions(-) rename {state/indexer => indexers/blockindexer}/block.go (100%) rename {state/indexer/block => indexers/blockindexer}/kv/kv.go (95%) rename {state/indexer/block => indexers/blockindexer}/kv/kv_test.go (97%) rename {state/indexer/block => indexers/blockindexer}/kv/util.go (100%) rename {state/indexer/block => indexers/blockindexer}/null/null.go (90%) rename {state/indexer => indexers/blockindexer}/query_range.go (100%) rename {state => indexers}/txindex/indexer.go (95%) rename {state => indexers}/txindex/indexer_service.go (97%) rename {state => indexers}/txindex/indexer_service_test.go (91%) rename {state => indexers}/txindex/kv/kv.go (96%) rename {state => indexers}/txindex/kv/kv_bench_test.go (100%) rename {state => indexers}/txindex/kv/kv_test.go (99%) rename {state => indexers}/txindex/kv/utils.go (100%) rename {state => indexers}/txindex/kv/utils_test.go (100%) rename {state => indexers}/txindex/null/null.go (100%) diff --git a/state/indexer/block.go b/indexers/blockindexer/block.go similarity index 100% rename from state/indexer/block.go rename to indexers/blockindexer/block.go diff --git a/state/indexer/block/kv/kv.go b/indexers/blockindexer/kv/kv.go similarity index 95% rename from state/indexer/block/kv/kv.go rename to indexers/blockindexer/kv/kv.go index 17fd430ba..f10b9cd33 100644 --- a/state/indexer/block/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -12,10 +12,12 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" - "github.com/dymensionxyz/dymint/state/indexer" + indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" "github.com/dymensionxyz/dymint/store" + "github.com/dymensionxyz/dymint/types" + + tmtypes "github.com/tendermint/tendermint/types" ) var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) @@ -24,10 +26,10 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // events with an underlying KV store. Block events are indexed by their height, // such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { - store store.KVStore + store types.KVStore } -func New(store store.KVStore) *BlockerIndexer { +func New(store types.KVStore) *BlockerIndexer { return &BlockerIndexer{ store: store, } @@ -54,7 +56,7 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { // primary key: encode(block.height | height) => encode(height) // BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) // EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) -func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { +func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Discard() @@ -255,7 +257,7 @@ LOOP: err error ) - if qr.Key == types.BlockHeightKey { + if qr.Key == tmtypes.BlockHeightKey { eventValue, err = parseValueFromPrimaryKey(it.Key()) } else { eventValue, err = parseValueFromEventKey(it.Key()) @@ -479,7 +481,7 @@ func (idx *BlockerIndexer) match( return filteredHeights, nil } -func (idx *BlockerIndexer) indexEvents(batch store.Batch, events []abci.Event, typ string, height int64) error { +func (idx *BlockerIndexer) indexEvents(batch types.StoreBatch, events []abci.Event, typ string, height int64) error { heightBz := int64ToBytes(height) for _, event := range events { @@ -495,7 +497,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.Batch, events []abci.Event, t // index iff the event specified index:true and it's not a reserved event compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) - if compositeKey == types.BlockHeightKey { + if compositeKey == tmtypes.BlockHeightKey { return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) } diff --git a/state/indexer/block/kv/kv_test.go b/indexers/blockindexer/kv/kv_test.go similarity index 97% rename from state/indexer/block/kv/kv_test.go rename to indexers/blockindexer/kv/kv_test.go index a44abff88..55bb88539 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/indexers/blockindexer/kv/kv_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - blockidxkv "github.com/dymensionxyz/dymint/state/indexer/block/kv" + blockidxkv "github.com/dymensionxyz/dymint/indexers/blockindexer/kv" "github.com/dymensionxyz/dymint/store" ) diff --git a/state/indexer/block/kv/util.go b/indexers/blockindexer/kv/util.go similarity index 100% rename from state/indexer/block/kv/util.go rename to indexers/blockindexer/kv/util.go diff --git a/state/indexer/block/null/null.go b/indexers/blockindexer/null/null.go similarity index 90% rename from state/indexer/block/null/null.go rename to indexers/blockindexer/null/null.go index 3f4f65e6f..62658e00e 100644 --- a/state/indexer/block/null/null.go +++ b/indexers/blockindexer/null/null.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/dymensionxyz/dymint/state/indexer" + indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/query_range.go b/indexers/blockindexer/query_range.go similarity index 100% rename from state/indexer/query_range.go rename to indexers/blockindexer/query_range.go diff --git a/state/txindex/indexer.go b/indexers/txindex/indexer.go similarity index 95% rename from state/txindex/indexer.go rename to indexers/txindex/indexer.go index 388d47c18..6d9f6807d 100644 --- a/state/txindex/indexer.go +++ b/indexers/txindex/indexer.go @@ -8,8 +8,6 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) -// XXX/TODO: These types should be moved to the indexer package. - // TxIndexer interface defines methods to index and search transactions. type TxIndexer interface { // AddBatch analyzes, indexes and stores a batch of transactions. diff --git a/state/txindex/indexer_service.go b/indexers/txindex/indexer_service.go similarity index 97% rename from state/txindex/indexer_service.go rename to indexers/txindex/indexer_service.go index b54efdcf9..8fd936c57 100644 --- a/state/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -3,7 +3,7 @@ package txindex import ( "context" - "github.com/dymensionxyz/dymint/state/indexer" + indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) diff --git a/state/txindex/indexer_service_test.go b/indexers/txindex/indexer_service_test.go similarity index 91% rename from state/txindex/indexer_service_test.go rename to indexers/txindex/indexer_service_test.go index dcac5fb99..3f68eb23b 100644 --- a/state/txindex/indexer_service_test.go +++ b/indexers/txindex/indexer_service_test.go @@ -9,9 +9,9 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" - blockidxkv "github.com/dymensionxyz/dymint/state/indexer/block/kv" - "github.com/dymensionxyz/dymint/state/txindex" - "github.com/dymensionxyz/dymint/state/txindex/kv" + blockidxkv "github.com/dymensionxyz/dymint/indexers/blockindexer/kv" + "github.com/dymensionxyz/dymint/indexers/txindex" + "github.com/dymensionxyz/dymint/indexers/txindex/kv" "github.com/dymensionxyz/dymint/store" ) diff --git a/state/txindex/kv/kv.go b/indexers/txindex/kv/kv.go similarity index 96% rename from state/txindex/kv/kv.go rename to indexers/txindex/kv/kv.go index 85ddf8169..0c985154d 100644 --- a/state/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -12,11 +12,11 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" - "github.com/dymensionxyz/dymint/state/indexer" - "github.com/dymensionxyz/dymint/state/txindex" - "github.com/dymensionxyz/dymint/store" + indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" + "github.com/dymensionxyz/dymint/indexers/txindex" + "github.com/dymensionxyz/dymint/types" ) const ( @@ -27,11 +27,11 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) // TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { - store store.KVStore + store types.KVStore } // NewTxIndex creates new KV indexer. -func NewTxIndex(store store.KVStore) *TxIndex { +func NewTxIndex(store types.KVStore) *TxIndex { return &TxIndex{ store: store, } @@ -133,7 +133,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { return b.Commit() } -func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.Batch) error { +func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store types.StoreBatch) error { for _, event := range result.Result.Events { // only index events with a non-empty type if len(event.Type) == 0 { @@ -282,7 +282,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) { for _, c := range conditions { - if c.CompositeKey == types.TxHashKey { + if c.CompositeKey == tmtypes.TxHashKey { decoded, err := hex.DecodeString(c.Operand.(string)) return decoded, true, err } @@ -293,7 +293,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) // lookForHeight returns a height if there is an "height=X" condition. func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { - if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual { + if c.CompositeKey == tmtypes.TxHeightKey && c.Op == query.OpEqual { return c.Operand.(int64) } } @@ -574,7 +574,7 @@ func keyForEvent(key string, value []byte, result *abci.TxResult) []byte { func keyForHeight(result *abci.TxResult) []byte { return []byte(fmt.Sprintf("%s/%d/%d/%d", - types.TxHeightKey, + tmtypes.TxHeightKey, result.Height, result.Height, result.Index, diff --git a/state/txindex/kv/kv_bench_test.go b/indexers/txindex/kv/kv_bench_test.go similarity index 100% rename from state/txindex/kv/kv_bench_test.go rename to indexers/txindex/kv/kv_bench_test.go diff --git a/state/txindex/kv/kv_test.go b/indexers/txindex/kv/kv_test.go similarity index 99% rename from state/txindex/kv/kv_test.go rename to indexers/txindex/kv/kv_test.go index ae8d18853..023bb1f84 100644 --- a/state/txindex/kv/kv_test.go +++ b/indexers/txindex/kv/kv_test.go @@ -15,7 +15,7 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" - "github.com/dymensionxyz/dymint/state/txindex" + "github.com/dymensionxyz/dymint/indexers/txindex" "github.com/dymensionxyz/dymint/store" ) diff --git a/state/txindex/kv/utils.go b/indexers/txindex/kv/utils.go similarity index 100% rename from state/txindex/kv/utils.go rename to indexers/txindex/kv/utils.go diff --git a/state/txindex/kv/utils_test.go b/indexers/txindex/kv/utils_test.go similarity index 100% rename from state/txindex/kv/utils_test.go rename to indexers/txindex/kv/utils_test.go diff --git a/state/txindex/null/null.go b/indexers/txindex/null/null.go similarity index 100% rename from state/txindex/null/null.go rename to indexers/txindex/null/null.go diff --git a/node/node.go b/node/node.go index 1ad125f2b..fe367d0b3 100644 --- a/node/node.go +++ b/node/node.go @@ -23,16 +23,16 @@ import ( "github.com/dymensionxyz/dymint/config" "github.com/dymensionxyz/dymint/da" daregistry "github.com/dymensionxyz/dymint/da/registry" + indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" + blockidxkv "github.com/dymensionxyz/dymint/indexers/blockindexer/kv" + "github.com/dymensionxyz/dymint/indexers/txindex" + "github.com/dymensionxyz/dymint/indexers/txindex/kv" "github.com/dymensionxyz/dymint/mempool" mempoolv1 "github.com/dymensionxyz/dymint/mempool/v1" nodemempool "github.com/dymensionxyz/dymint/node/mempool" "github.com/dymensionxyz/dymint/p2p" "github.com/dymensionxyz/dymint/settlement" slregistry "github.com/dymensionxyz/dymint/settlement/registry" - "github.com/dymensionxyz/dymint/state/indexer" - blockidxkv "github.com/dymensionxyz/dymint/state/indexer/block/kv" - "github.com/dymensionxyz/dymint/state/txindex" - "github.com/dymensionxyz/dymint/state/txindex/kv" "github.com/dymensionxyz/dymint/store" ) From 9815da3c281b725ea7bc18516f4d2e5d33f700b4 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 11:40:26 +0300 Subject: [PATCH 18/35] renamed state and some struct fields --- block/block.go | 26 +++++++++++++------------- block/initchain.go | 6 +++--- block/manager.go | 10 +++++----- block/manager_test.go | 18 +++++++++--------- block/produce.go | 10 +++++----- block/pruning.go | 6 +++--- block/state.go | 2 +- testutil/types.go | 4 ++-- types/serialization.go | 8 ++++---- types/state.go | 32 +++++++++++++++++--------------- 10 files changed, 62 insertions(+), 60 deletions(-) diff --git a/block/block.go b/block/block.go index 02c7ce3c2..b548d08c5 100644 --- a/block/block.go +++ b/block/block.go @@ -48,12 +48,12 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("save block: %w", err) } - responses, err := m.Executor.ExecuteBlock(m.LastState, block) + responses, err := m.Executor.ExecuteBlock(m.State, block) if err != nil { return fmt.Errorf("execute block: %w", err) } - newState, err := m.Executor.UpdateStateFromResponses(responses, m.LastState, block) + newState, err := m.Executor.UpdateStateFromResponses(responses, m.State, block) if err != nil { return fmt.Errorf("update state from responses: %w", err) } @@ -66,13 +66,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("save block responses: %w", err) } - m.LastState = newState - batch, err = m.Store.UpdateState(m.LastState, batch) + m.State = newState + batch, err = m.Store.UpdateState(m.State, batch) if err != nil { batch.Discard() return fmt.Errorf("update state: %w", err) } - batch, err = m.Store.SaveValidators(block.Header.Height, m.LastState.Validators, batch) + batch, err = m.Store.SaveValidators(block.Header.Height, m.State.Validators, batch) if err != nil { batch.Discard() return fmt.Errorf("save validators: %w", err) @@ -91,7 +91,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Prune old heights, if requested by ABCI app. if retainHeight > 0 { - pruned, err := m.pruneBlocks(retainHeight) + pruned, err := m.pruneBlocks(uint64(retainHeight)) if err != nil { m.logger.Error("prune blocks", "retain_height", retainHeight, "err", err) } else { @@ -101,7 +101,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - newState.LastValidators = m.LastState.Validators.Copy() + newState.LastValidators = m.State.Validators.Copy() newState.LastStoreHeight = block.Header.Height newState.BaseHeight = m.Store.Base() @@ -109,7 +109,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta if err != nil { return fmt.Errorf("final update state: %w", err) } - m.LastState = newState + m.State = newState if ok := m.Store.SetHeight(block.Header.Height); !ok { return fmt.Errorf("store set height: %d", block.Header.Height) @@ -172,15 +172,15 @@ func (m *Manager) UpdateStateFromApp() error { appHeight := uint64(proxyAppInfo.LastBlockHeight) // update the state with the hash, last store height and last validators. - m.LastState.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) - m.LastState.LastStoreHeight = appHeight - m.LastState.LastValidators = m.LastState.Validators.Copy() + m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) + m.State.LastStoreHeight = appHeight + m.State.LastValidators = m.State.Validators.Copy() resp, err := m.Store.LoadBlockResponses(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } - copy(m.LastState.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) _, err = m.Store.UpdateState(m.LastState, nil) if err != nil { @@ -197,7 +197,7 @@ func (m *Manager) validateBlock(block *types.Block, commit *types.Commit) error // dymint to start proposer := m.SLClient.GetProposer() - return types.ValidateProposedTransition(m.LastState, block, commit, proposer) + return types.ValidateProposedTransition(m.State, block, commit, proposer) } func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { diff --git a/block/initchain.go b/block/initchain.go index 467246155..abd011bfe 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -23,10 +23,10 @@ func (m *Manager) RunInitChain(ctx context.Context) error { } // update the state with only the consensus pubkey - m.Executor.UpdateStateAfterInitChain(&m.LastState, res, gensisValSet) - m.Executor.UpdateMempoolAfterInitChain(&m.LastState) + m.Executor.UpdateStateAfterInitChain(&m.State, res, gensisValSet) + m.Executor.UpdateMempoolAfterInitChain(&m.State) - if _, err := m.Store.UpdateState(m.LastState, nil); err != nil { + if _, err := m.Store.UpdateState(m.State, nil); err != nil { return err } diff --git a/block/manager.go b/block/manager.go index 4b2eed35d..f6a2a935f 100644 --- a/block/manager.go +++ b/block/manager.go @@ -46,9 +46,9 @@ type Manager struct { ProposerKey crypto.PrivKey // Store and execution - Store store.Store - LastState types.State - Executor *Executor + Store types.Store + State types.State + Executor *Executor // Clients and servers Pubsub *pubsub.Server @@ -116,7 +116,7 @@ func NewManager( ProposerKey: proposerKey, Conf: conf, Genesis: genesis, - LastState: s, + State: s, Store: store, Executor: exec, DAClient: dalc, @@ -147,7 +147,7 @@ func (m *Manager) Start(ctx context.Context, isAggregator bool) error { } // Check if InitChain flow is needed - if m.LastState.IsGenesis() { + if m.State.IsGenesis() { m.logger.Info("Running InitChain") err := m.RunInitChain(ctx) diff --git a/block/manager_test.go b/block/manager_test.go index 1c9cccc71..b40b5cc88 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -69,15 +69,15 @@ func TestInitialState(t *testing.T) { name string store store.Store genesis *tmtypes.GenesisDoc - expectedInitialHeight int64 - expectedLastBlockHeight int64 + expectedInitialHeight uint64 + expectedLastBlockHeight uint64 expectedChainID string }{ { name: "empty store", store: emptyStore, genesis: genesis, - expectedInitialHeight: genesis.InitialHeight, + expectedInitialHeight: uint64(genesis.InitialHeight), expectedLastBlockHeight: 0, expectedChainID: genesis.ChainID, }, @@ -98,9 +98,9 @@ func TestInitialState(t *testing.T) { nil, pubsubServer, p2pClient, logger) assert.NoError(err) assert.NotNil(agg) - assert.Equal(c.expectedChainID, agg.LastState.ChainID) - assert.Equal(c.expectedInitialHeight, agg.LastState.InitialHeight) - assert.Equal(c.expectedLastBlockHeight, agg.LastState.LastBlockHeight) + assert.Equal(c.expectedChainID, agg.State.ChainID) + assert.Equal(c.expectedInitialHeight, agg.State.InitialHeight) + assert.Equal(c.expectedLastBlockHeight, agg.State.LastBlockHeight) }) } } @@ -185,7 +185,7 @@ func TestProduceNewBlock(t *testing.T) { require.NoError(t, err) // Validate state is updated with the commit hash assert.Equal(t, uint64(1), manager.Store.Height()) - assert.Equal(t, commitHash, manager.LastState.AppHash) + assert.Equal(t, commitHash, manager.State.AppHash) } func TestProducePendingBlock(t *testing.T) { @@ -211,7 +211,7 @@ func TestProducePendingBlock(t *testing.T) { _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.NoError(t, err) // Validate state is updated with the block that was saved in the store - assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.LastState.LastBlockID.Hash)) + assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.State.LastBlockID.Hash)) } // Test that in case we fail after the proxy app commit, next time we won't commit again to the proxy app @@ -308,7 +308,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { mockStore.ShoudFailUpdateState = tc.shouldFailUpdateState _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) require.Equal(tc.expectedStoreHeight, manager.Store.Height(), tc.name) - require.Equal(tc.expectedStateAppHash, manager.LastState.AppHash, tc.name) + require.Equal(tc.expectedStateAppHash, manager.State.AppHash, tc.name) storeState, err := manager.Store.LoadState() require.NoError(err) require.Equal(tc.expectedStateAppHash, storeState.AppHash, tc.name) diff --git a/block/produce.go b/block/produce.go index 3613bee43..dfe9d5a06 100644 --- a/block/produce.go +++ b/block/produce.go @@ -114,8 +114,8 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er err error ) - if m.LastState.IsGenesis() { - newHeight = uint64(m.LastState.InitialHeight) + if m.State.IsGenesis() { + newHeight = uint64(m.State.InitialHeight) lastCommit = &types.Commit{} m.LastState.BaseHeight = newHeight if ok := m.Store.SetBase(newHeight); !ok { @@ -151,7 +151,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er } else if !errors.Is(err, store.ErrKeyNotFound) { return nil, nil, fmt.Errorf("load block: height: %d: %w: %w", newHeight, err, ErrNonRecoverable) } else { - block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, m.LastState, m.Conf.BlockBatchMaxSizeBytes) + block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, m.State, m.Conf.BlockBatchMaxSizeBytes) if !allowEmpty && len(block.Data.Txs) == 0 { return nil, nil, fmt.Errorf("%w: %w", types.ErrSkippedEmptyBlock, ErrRecoverable) } @@ -216,14 +216,14 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, tmprivkey.PubKey().Bytes() // Create a mock validator to sign the vote tmvalidator := tmtypes.NewMockPVWithParams(tmprivkey, false, false) - err := tmvalidator.SignVote(m.LastState.ChainID, v) + err := tmvalidator.SignVote(m.State.ChainID, v) if err != nil { return nil, err } // Update the vote with the signature vote.Signature = v.Signature pubKey := tmprivkey.PubKey() - voteSignBytes := tmtypes.VoteSignBytes(m.LastState.ChainID, v) + voteSignBytes := tmtypes.VoteSignBytes(m.State.ChainID, v) if !pubKey.VerifySignature(voteSignBytes, vote.Signature) { return nil, fmt.Errorf("wrong signature") } diff --git a/block/pruning.go b/block/pruning.go index 525e89840..27bb60025 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -4,14 +4,14 @@ import ( "fmt" ) -func (m *Manager) pruneBlocks(retainHeight int64) (uint64, error) { +func (m *Manager) pruneBlocks(retainHeight uint64) (uint64, error) { syncTarget := m.SyncTarget.Load() - if retainHeight > int64(syncTarget) { + if retainHeight > syncTarget { return 0, fmt.Errorf("cannot prune uncommitted blocks") } - pruned, err := m.Store.PruneBlocks(retainHeight) + pruned, err := m.Store.PruneBlocks(m.State.BaseHeight, retainHeight) if err != nil { return 0, fmt.Errorf("prune block store: %w", err) } diff --git a/block/state.go b/block/state.go index 6c5770b65..e41093659 100644 --- a/block/state.go +++ b/block/state.go @@ -37,7 +37,7 @@ func (e *Executor) updateState(state types.State, block *types.Block, abciRespon Version: state.Version, ChainID: state.ChainID, InitialHeight: state.InitialHeight, - LastBlockHeight: int64(block.Header.Height), + LastBlockHeight: block.Header.Height, LastBlockTime: time.Unix(0, int64(block.Header.Time)), LastBlockID: tmtypes.BlockID{ Hash: hash[:], diff --git a/testutil/types.go b/testutil/types.go index 4a00e24d2..efe5a1ee1 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -196,7 +196,7 @@ func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { return types.State{ ChainID: "test-chain", - InitialHeight: initialHeight, + InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, LastResultsHash: getEmptyLastResultsHash(), Version: tmstate.Version{ @@ -205,7 +205,7 @@ func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { App: AppVersion, }, }, - LastBlockHeight: lastBlockHeight, + LastBlockHeight: uint64(lastBlockHeight), LastValidators: GenerateRandomValidatorSet(), Validators: GenerateRandomValidatorSet(), NextValidators: GenerateRandomValidatorSet(), diff --git a/types/serialization.go b/types/serialization.go index 814dd3ea5..999f506cd 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -259,8 +259,8 @@ func (s *State) ToProto() (*pb.State, error) { return &pb.State{ Version: &s.Version, ChainId: s.ChainID, - InitialHeight: s.InitialHeight, - LastBlockHeight: s.LastBlockHeight, + InitialHeight: int64(s.InitialHeight), + LastBlockHeight: int64(s.LastBlockHeight), LastBlockID: s.LastBlockID.ToProto(), LastBlockTime: s.LastBlockTime, NextValidators: nextValidators, @@ -281,8 +281,8 @@ func (s *State) FromProto(other *pb.State) error { var err error s.Version = *other.Version s.ChainID = other.ChainId - s.InitialHeight = other.InitialHeight - s.LastBlockHeight = other.LastBlockHeight + s.InitialHeight = uint64(other.InitialHeight) + s.LastBlockHeight = uint64(other.LastBlockHeight) // TODO(omritoptix): remove this as this is only for backwards compatibility // with old state files that don't have this field. if other.LastStoreHeight == 0 && other.LastBlockHeight > 1 { diff --git a/types/state.go b/types/state.go index 4715bf034..316c7eaf3 100644 --- a/types/state.go +++ b/types/state.go @@ -2,6 +2,7 @@ package types import ( "fmt" + "sync/atomic" "time" // TODO(tzdybal): copy to local project? @@ -12,28 +13,16 @@ import ( "github.com/tendermint/tendermint/version" ) -// InitStateVersion sets the Consensus.Block and Software versions, -// but leaves the Consensus.App version blank. -// The Consensus.App version will be set during the Handshake, once -// we hear from the app what protocol version it is running. -var InitStateVersion = tmstate.Version{ - Consensus: tmversion.Consensus{ - Block: version.BlockProtocol, - App: 0, - }, - Software: version.TMCoreSemVer, -} - // State contains information about current state of the blockchain. type State struct { Version tmstate.Version // immutable ChainID string - InitialHeight int64 // should be 1, not 0, when starting from height 1 + InitialHeight uint64 // should be 1, not 0, when starting from height 1 // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 + LastBlockHeight uint64 LastBlockID types.BlockID LastBlockTime time.Time @@ -61,6 +50,7 @@ type State struct { AppHash [32]byte } +// FIXME: move from types package // NewFromGenesisDoc reads blockchain State from genesis. func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { err := genDoc.ValidateAndComplete() @@ -72,10 +62,22 @@ func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { validatorSet = types.NewValidatorSet(nil) nextValidatorSet = types.NewValidatorSet(nil) + // InitStateVersion sets the Consensus.Block and Software versions, + // but leaves the Consensus.App version blank. + // The Consensus.App version will be set during the Handshake, once + // we hear from the app what protocol version it is running. + var InitStateVersion = tmstate.Version{ + Consensus: tmversion.Consensus{ + Block: version.BlockProtocol, + App: 0, + }, + Software: version.TMCoreSemVer, + } + s := State{ Version: InitStateVersion, ChainID: genDoc.ChainID, - InitialHeight: genDoc.InitialHeight, + InitialHeight: uint64(genDoc.InitialHeight), LastBlockHeight: 0, LastBlockID: types.BlockID{}, From 5c7c4548733aa75c7c9e15b2a26b5e97d10a7157 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 11:57:00 +0300 Subject: [PATCH 19/35] moved store back to store package --- block/manager.go | 4 +-- da/avail/avail.go | 2 +- da/grpc/grpc.go | 2 +- da/grpc/mockserv/mockserv.go | 3 +- indexers/blockindexer/kv/kv.go | 7 ++-- indexers/txindex/kv/kv.go | 7 ++-- store/kv.go | 29 ---------------- store/pruning.go | 22 +++++-------- store/{types.go => storeIface.go} | 55 ++++++++++++++++++++----------- store/store_test.go | 3 +- 10 files changed, 59 insertions(+), 75 deletions(-) rename store/{types.go => storeIface.go} (62%) diff --git a/block/manager.go b/block/manager.go index f6a2a935f..9bd7888dc 100644 --- a/block/manager.go +++ b/block/manager.go @@ -10,6 +10,7 @@ import ( "time" "github.com/dymensionxyz/dymint/gerr" + "github.com/dymensionxyz/dymint/store" uevent "github.com/dymensionxyz/dymint/utils/event" @@ -28,7 +29,6 @@ import ( "github.com/dymensionxyz/dymint/da" "github.com/dymensionxyz/dymint/mempool" "github.com/dymensionxyz/dymint/settlement" - "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" ) @@ -46,7 +46,7 @@ type Manager struct { ProposerKey crypto.PrivKey // Store and execution - Store types.Store + Store store.Store State types.State Executor *Executor diff --git a/da/avail/avail.go b/da/avail/avail.go index e5b20eabd..e6fe41d70 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -10,6 +10,7 @@ import ( "github.com/avast/retry-go/v4" "github.com/gogo/protobuf/proto" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" @@ -19,7 +20,6 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v4/signature" availtypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/dymensionxyz/dymint/da" - "github.com/dymensionxyz/dymint/store" pb "github.com/dymensionxyz/dymint/types/pb/dymint" "github.com/tendermint/tendermint/libs/pubsub" ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index 85c87d6ff..aff6da5a4 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -5,11 +5,11 @@ import ( "encoding/json" "strconv" + "github.com/dymensionxyz/dymint/store" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "github.com/dymensionxyz/dymint/da" - "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/pb/dalc" "github.com/tendermint/tendermint/libs/pubsub" diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index a467dfe01..c2bd61d1a 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -3,14 +3,15 @@ package mockserv import ( "context" "os" + "time" + "github.com/dymensionxyz/dymint/store" tmlog "github.com/tendermint/tendermint/libs/log" "google.golang.org/grpc" "github.com/dymensionxyz/dymint/da" grpcda "github.com/dymensionxyz/dymint/da/grpc" "github.com/dymensionxyz/dymint/da/local" - "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/pb/dalc" "github.com/dymensionxyz/dymint/types/pb/dymint" diff --git a/indexers/blockindexer/kv/kv.go b/indexers/blockindexer/kv/kv.go index f10b9cd33..dce0c572c 100644 --- a/indexers/blockindexer/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -15,7 +15,6 @@ import ( indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" "github.com/dymensionxyz/dymint/store" - "github.com/dymensionxyz/dymint/types" tmtypes "github.com/tendermint/tendermint/types" ) @@ -26,10 +25,10 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // events with an underlying KV store. Block events are indexed by their height, // such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { - store types.KVStore + store store.KVStore } -func New(store types.KVStore) *BlockerIndexer { +func New(store store.KVStore) *BlockerIndexer { return &BlockerIndexer{ store: store, } @@ -481,7 +480,7 @@ func (idx *BlockerIndexer) match( return filteredHeights, nil } -func (idx *BlockerIndexer) indexEvents(batch types.StoreBatch, events []abci.Event, typ string, height int64) error { +func (idx *BlockerIndexer) indexEvents(batch store.Batch, events []abci.Event, typ string, height int64) error { heightBz := int64ToBytes(height) for _, event := range events { diff --git a/indexers/txindex/kv/kv.go b/indexers/txindex/kv/kv.go index 0c985154d..03c926ede 100644 --- a/indexers/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -16,6 +16,7 @@ import ( indexer "github.com/dymensionxyz/dymint/indexers/blockindexer" "github.com/dymensionxyz/dymint/indexers/txindex" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" ) @@ -27,11 +28,11 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) // TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { - store types.KVStore + store store.KVStore } // NewTxIndex creates new KV indexer. -func NewTxIndex(store types.KVStore) *TxIndex { +func NewTxIndex(store store.KVStore) *TxIndex { return &TxIndex{ store: store, } @@ -133,7 +134,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { return b.Commit() } -func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store types.StoreBatch) error { +func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.Batch) error { for _, event := range result.Result.Events { // only index events with a non-empty type if len(event.Type) == 0 { diff --git a/store/kv.go b/store/kv.go index e118d2c0c..7dcab1a4f 100644 --- a/store/kv.go +++ b/store/kv.go @@ -6,35 +6,6 @@ import ( "github.com/dgraph-io/badger/v3" ) -// KVStore encapsulates key-value store abstraction, in minimalistic interface. -// -// KVStore MUST be thread safe. -type KVStore interface { - Get(key []byte) ([]byte, error) // Get gets the value for a key. - Set(key []byte, value []byte) error // Set updates the value for a key. - Delete(key []byte) error // Delete deletes a key. - NewBatch() Batch // NewBatch creates a new batch. - PrefixIterator(prefix []byte) Iterator // PrefixIterator creates iterator to traverse given prefix. -} - -// Batch enables batching of transactions. -type Batch interface { - Set(key, value []byte) error // Accumulates KV entries in a transaction. - Delete(key []byte) error // Deletes the given key. - Commit() error // Commits the transaction. - Discard() // Discards the transaction. -} - -// Iterator enables traversal over a given prefix. -type Iterator interface { - Valid() bool - Next() - Key() []byte - Value() []byte - Error() error - Discard() -} - // NewDefaultInMemoryKVStore builds KVStore that works in-memory (without accessing disk). func NewDefaultInMemoryKVStore() KVStore { db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) diff --git a/store/pruning.go b/store/pruning.go index 09908ec5a..a469b633b 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -1,18 +1,15 @@ package store -import "fmt" +import ( + "fmt" +) // PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned. -func (s *DefaultStore) PruneBlocks(heightInt int64) (uint64, error) { - if heightInt <= 0 { - return 0, fmt.Errorf("height must be greater than 0") +func (s *DefaultStore) PruneBlocks(base, height uint64) (uint64, error) { + if base <= 0 { + return 0, fmt.Errorf("from height must be greater than 0") } - height := uint64(heightInt) - if height > s.Height() { - return 0, fmt.Errorf("cannot prune beyond the latest height %v", s.height) - } - base := s.Base() if height < base { return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", height, base) @@ -22,13 +19,10 @@ func (s *DefaultStore) PruneBlocks(heightInt int64) (uint64, error) { batch := s.db.NewBatch() defer batch.Discard() - flush := func(batch Batch, base uint64) error { + flush := func(batch Batch, height uint64) error { err := batch.Commit() if err != nil { - return fmt.Errorf("prune up to height %v: %w", base, err) - } - if ok := s.SetBase(base); !ok { - return fmt.Errorf("set base height: %v", base) + return fmt.Errorf("flush batch to disk: height %d: %w", height, err) } return nil } diff --git a/store/types.go b/store/storeIface.go similarity index 62% rename from store/types.go rename to store/storeIface.go index 2a531eaaa..f9417dcb7 100644 --- a/store/types.go +++ b/store/storeIface.go @@ -1,31 +1,44 @@ package store import ( + "github.com/dymensionxyz/dymint/types" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmtypes "github.com/tendermint/tendermint/types" - - "github.com/dymensionxyz/dymint/types" ) -// Store is minimal interface for storing and retrieving blocks, commits and state. -type Store interface { - // NewBatch creates a new db batch. - NewBatch() Batch - - // Height returns height of the highest block in store. - Height() uint64 - - // NextHeight returns the next height that expected to be stored in store. - NextHeight() uint64 +// KVStore encapsulates key-value store abstraction, in minimalistic interface. +// +// KVStore MUST be thread safe. +type KVStore interface { + Get(key []byte) ([]byte, error) // Get gets the value for a key. + Set(key []byte, value []byte) error // Set updates the value for a key. + Delete(key []byte) error // Delete deletes a key. + NewBatch() Batch // NewBatch creates a new batch. + PrefixIterator(prefix []byte) Iterator // PrefixIterator creates iterator to traverse given prefix. +} - // SetHeight sets the height saved in the Store if it is higher than the existing height. - SetHeight(height uint64) bool +// Batch enables batching of transactions. +type Batch interface { + Set(key, value []byte) error // Accumulates KV entries in a transaction. + Delete(key []byte) error // Deletes the given key. + Commit() error // Commits the transaction. + Discard() // Discards the transaction. +} - // Base returns height of the lowest block in store. - Base() uint64 +// Iterator enables traversal over a given prefix. +type Iterator interface { + Valid() bool + Next() + Key() []byte + Value() []byte + Error() error + Discard() +} - // SetBase sets the height saved in the Store for the lowest block - SetBase(height uint64) bool +// Store is minimal interface for storing and retrieving blocks, commits and state. +type Store interface { + // NewStoreBatch creates a new db batch. + NewBatch() Batch // SaveBlock saves block along with its seen commit (which will be included in the next block). SaveBlock(block *types.Block, commit *types.Commit, batch Batch) (Batch, error) @@ -58,5 +71,9 @@ type Store interface { LoadValidators(height uint64) (*tmtypes.ValidatorSet, error) // Pruning functions - PruneBlocks(height int64) (uint64, error) + PruneBlocks(from, to uint64) (uint64, error) + + // Left those for backward comptability. Should be removed in the future. + SetHeight(height uint64) + Height() uint64 } diff --git a/store/store_test.go b/store/store_test.go index 3241f47f2..d9771742c 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -8,6 +8,7 @@ import ( tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/dymensionxyz/dymint/store" + "github.com/dymensionxyz/dymint/testutil" "github.com/dymensionxyz/dymint/types" "github.com/stretchr/testify/assert" @@ -134,7 +135,7 @@ func TestRestart(t *testing.T) { s1 := store.New(kv) expectedHeight := uint64(10) _, err := s1.UpdateState(types.State{ - LastBlockHeight: int64(expectedHeight), + LastBlockHeight: expectedHeight, LastStoreHeight: uint64(expectedHeight), NextValidators: validatorSet, Validators: validatorSet, From f53ddc49ca3cd1ae13066569590dd27a24d85e34 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 12:32:44 +0300 Subject: [PATCH 20/35] moving height related managment to be based on State --- block/block.go | 16 ++--- block/manager.go | 20 +++--- block/produce.go | 8 +-- block/retriever.go | 2 +- go.mod | 2 +- indexers/txindex/null/null.go | 2 +- rpc/client/client.go | 14 ++--- rpc/client/client_test.go | 115 ++++++++++++++++++---------------- store/store.go | 40 ++---------- store/storeIface.go | 3 +- store/store_test.go | 2 +- types/state.go | 32 ++++++++++ 12 files changed, 136 insertions(+), 120 deletions(-) diff --git a/block/block.go b/block/block.go index b548d08c5..a926f7479 100644 --- a/block/block.go +++ b/block/block.go @@ -21,7 +21,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // TODO (#330): allow genesis block with height > 0 to be applied. // TODO: add switch case to have defined behavior for each case. // validate block height - if block.Header.Height != m.Store.NextHeight() { + if block.Header.Height != m.State.NextHeight() { return types.ErrInvalidBlockHeight } @@ -103,7 +103,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. newState.LastValidators = m.State.Validators.Copy() newState.LastStoreHeight = block.Header.Height - newState.BaseHeight = m.Store.Base() + newState.BaseHeight = m.State.Base() _, err = m.Store.UpdateState(newState, nil) if err != nil { @@ -111,7 +111,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } m.State = newState - if ok := m.Store.SetHeight(block.Header.Height); !ok { + if ok := m.State.SetHeight(block.Header.Height); !ok { return fmt.Errorf("store set height: %d", block.Header.Height) } @@ -124,7 +124,7 @@ func (m *Manager) attemptApplyCachedBlocks() error { defer m.retrieverMutex.Unlock() for { - expectedHeight := m.Store.NextHeight() + expectedHeight := m.State.NextHeight() cachedBlock, blockExists := m.blockCache[expectedHeight] if !blockExists { @@ -182,13 +182,13 @@ func (m *Manager) UpdateStateFromApp() error { } copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - _, err = m.Store.UpdateState(m.LastState, nil) + if ok := m.State.SetHeight(appHeight); !ok { + return fmt.Errorf("state set height: %d", appHeight) + } + _, err = m.Store.UpdateState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") } - if ok := m.Store.SetHeight(appHeight); !ok { - return fmt.Errorf("store set height: %d", appHeight) - } return nil } diff --git a/block/manager.go b/block/manager.go index 9bd7888dc..537ae4eff 100644 --- a/block/manager.go +++ b/block/manager.go @@ -197,7 +197,7 @@ func (m *Manager) syncBlockManager() error { return err } - m.logger.Info("Synced.", "current height", m.Store.Height(), "syncTarget", m.SyncTarget.Load()) + m.logger.Info("Synced.", "current height", m.State.Height(), "syncTarget", m.SyncTarget.Load()) return nil } @@ -226,13 +226,13 @@ func (m *Manager) onNewGossipedBlock(event pubsub.Message) { block := eventData.Block commit := eventData.Commit - nextHeight := m.Store.NextHeight() + nextHeight := m.State.NextHeight() if block.Header.Height >= nextHeight { m.blockCache[block.Header.Height] = CachedBlock{ Block: &block, Commit: &commit, } - m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.Store.Height()) + m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.State.Height()) } m.retrieverMutex.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant err := m.attemptApplyCachedBlocks() @@ -242,12 +242,18 @@ func (m *Manager) onNewGossipedBlock(event pubsub.Message) { } // getInitialState tries to load lastState from Store, and if it's not available it reads GenesisDoc. -func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (types.State, error) { - s, err := store.LoadState() +func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (s types.State, err error) { + s, err = store.LoadState() if errors.Is(err, types.ErrNoStateFound) { logger.Info("failed to find state in the store, creating new state from genesis") - return types.NewFromGenesisDoc(genesis) + s, err = types.NewFromGenesisDoc(genesis) } - return s, err + if err != nil { + return types.State{}, fmt.Errorf("get initial state: %w", err) + } + + // init store according to state + store.SetHeight(s.Height()) + return s, nil } diff --git a/block/produce.go b/block/produce.go index dfe9d5a06..0b5abbce1 100644 --- a/block/produce.go +++ b/block/produce.go @@ -117,12 +117,10 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er if m.State.IsGenesis() { newHeight = uint64(m.State.InitialHeight) lastCommit = &types.Commit{} - m.LastState.BaseHeight = newHeight - if ok := m.Store.SetBase(newHeight); !ok { - return nil, nil, fmt.Errorf("store set base: %d", newHeight) - } + m.State.BaseHeight = newHeight + m.State.SetBase(newHeight) } else { - height := m.Store.Height() + height := m.State.Height() newHeight = height + 1 lastCommit, err = m.Store.LoadCommit(height) if err != nil { diff --git a/block/retriever.go b/block/retriever.go index 61a757204..c41485e6b 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -106,7 +106,7 @@ func (m *Manager) ProcessNextDABatch(daMetaData *da.DASubmitMetaData) error { for _, batch := range batchResp.Batches { for i, block := range batch.Blocks { - if block.Header.Height != m.Store.NextHeight() { + if block.Header.Height != m.State.NextHeight() { continue } if err := m.validateBlock(block, batch.Commits[i]); err != nil { diff --git a/go.mod b/go.mod index 6aa73f21f..7fc804197 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.22.2 require ( code.cloudfoundry.org/go-diodes v0.0.0-20220725190411-383eb6634c40 cosmossdk.io/errors v1.0.1 - cosmossdk.io/math v1.3.0 github.com/avast/retry-go/v4 v4.5.0 github.com/celestiaorg/celestia-openrpc v0.4.0-rc.1 github.com/celestiaorg/go-cnc v0.4.2 @@ -248,6 +247,7 @@ require ( ) require ( + cosmossdk.io/math v1.3.0 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect github.com/blang/semver/v4 v4.0.0 // indirect diff --git a/indexers/txindex/null/null.go b/indexers/txindex/null/null.go index f8f8cba3e..4cf66cbbf 100644 --- a/indexers/txindex/null/null.go +++ b/indexers/txindex/null/null.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/dymensionxyz/dymint/state/txindex" + "github.com/dymensionxyz/dymint/indexers/txindex" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" ) diff --git a/rpc/client/client.go b/rpc/client/client.go index 17d3c42d5..c66e6209f 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -65,7 +65,7 @@ func NewClient(node *node.Node) *Client { // ABCIInfo returns basic information about application state. func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := c.query().InfoSync(proxy.RequestInfo) + resInfo, err := c.Query().InfoSync(proxy.RequestInfo) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexByt // ABCIQueryWithOptions queries for data from application. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - resQuery, err := c.query().QuerySync(abci.RequestQuery{ + resQuery, err := c.Query().QuerySync(abci.RequestQuery{ Path: path, Data: data, Height: opts.Height, @@ -802,7 +802,7 @@ func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.Res // // If valid, the tx is automatically added to the mempool. func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultCheckTx, error) { - res, err := c.mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) + res, err := c.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err } @@ -858,19 +858,19 @@ func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscr } } -func (c *Client) consensus() proxy.AppConnConsensus { +func (c *Client) Consensus() proxy.AppConnConsensus { return c.node.ProxyApp().Consensus() } -func (c *Client) mempool() proxy.AppConnMempool { +func (c *Client) Mempool() proxy.AppConnMempool { return c.node.ProxyApp().Mempool() } -func (c *Client) query() proxy.AppConnQuery { +func (c *Client) Query() proxy.AppConnQuery { return c.node.ProxyApp().Query() } -func (c *Client) snapshot() proxy.AppConnSnapshot { +func (c *Client) Snapshot() proxy.AppConnSnapshot { return c.node.ProxyApp().Snapshot() } diff --git a/rpc/client/client_test.go b/rpc/client/client_test.go index 06abb7c24..931c54b15 100644 --- a/rpc/client/client_test.go +++ b/rpc/client/client_test.go @@ -1,4 +1,4 @@ -package client +package client_test import ( "context" @@ -29,7 +29,9 @@ import ( "github.com/dymensionxyz/dymint/mempool" tmmocks "github.com/dymensionxyz/dymint/mocks/github.com/tendermint/tendermint/abci/types" "github.com/dymensionxyz/dymint/node" + "github.com/dymensionxyz/dymint/rpc/client" "github.com/dymensionxyz/dymint/settlement" + "github.com/dymensionxyz/dymint/testutil" "github.com/dymensionxyz/dymint/types" ) @@ -45,10 +47,10 @@ func TestConnectionGetters(t *testing.T) { assert := assert.New(t) _, rpc := getRPC(t) - assert.NotNil(rpc.consensus()) - assert.NotNil(rpc.mempool()) - assert.NotNil(rpc.snapshot()) - assert.NotNil(rpc.query()) + assert.NotNil(rpc.Consensus()) + assert.NotNil(rpc.Mempool()) + assert.NotNil(rpc.Snapshot()) + assert.NotNil(rpc.Query()) } func TestInfo(t *testing.T) { @@ -127,14 +129,14 @@ func TestGenesisChunked(t *testing.T) { ) require.NoError(t, err) - rpc := NewClient(n) + rpc := client.NewClient(n) var expectedID uint = 2 gc, err := rpc.GenesisChunked(context.Background(), expectedID) assert.Error(err) assert.Nil(gc) - err = rpc.node.Start() + err = n.Start() require.NoError(t, err) expectedID = 0 @@ -154,11 +156,11 @@ func TestBroadcastTxAsync(t *testing.T) { expectedTx := []byte("tx data") - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("CheckTx", abci.RequestCheckTx{Tx: expectedTx}).Return(abci.ResponseCheckTx{}) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) - err := rpc.node.Start() + err := node.Start() require.NoError(t, err) res, err := rpc.BroadcastTxAsync(context.Background(), expectedTx) @@ -171,7 +173,7 @@ func TestBroadcastTxAsync(t *testing.T) { assert.NotEmpty(res.Hash) mockApp.AssertExpectations(t) - err = rpc.node.Stop() + err = node.Stop() require.NoError(t, err) } @@ -190,10 +192,10 @@ func TestBroadcastTxSync(t *testing.T) { Codespace: "space", } - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) - err := rpc.node.Start() + err := node.Start() require.NoError(t, err) mockApp.On("CheckTx", abci.RequestCheckTx{Tx: expectedTx}).Return(expectedResponse) @@ -208,7 +210,7 @@ func TestBroadcastTxSync(t *testing.T) { assert.NotEmpty(res.Hash) mockApp.AssertExpectations(t) - err = rpc.node.Stop() + err = node.Stop() require.NoError(t, err) } @@ -238,18 +240,18 @@ func TestBroadcastTxCommit(t *testing.T) { Codespace: "space", } - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.BeginBlock(abci.RequestBeginBlock{}) mockApp.On("CheckTx", abci.RequestCheckTx{Tx: expectedTx}).Return(expectedCheckResp) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) // in order to broadcast, the node must be started - err := rpc.node.Start() + err := node.Start() require.NoError(err) go func() { time.Sleep(mockTxProcessingTime) - err := rpc.node.EventBus().PublishEventTx(tmtypes.EventDataTx{TxResult: abci.TxResult{ + err := node.EventBus().PublishEventTx(tmtypes.EventDataTx{TxResult: abci.TxResult{ Height: 1, Index: 0, Tx: expectedTx, @@ -265,7 +267,7 @@ func TestBroadcastTxCommit(t *testing.T) { assert.Equal(expectedDeliverResp, res.DeliverTx) mockApp.AssertExpectations(t) - err = rpc.node.Stop() + err = node.Stop() require.NoError(err) } @@ -273,14 +275,14 @@ func TestGetBlock(t *testing.T) { assert := assert.New(t) require := require.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("CheckTx", mock.Anything).Return(abci.ResponseCheckTx{}) mockApp.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{}) mockApp.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) - err := rpc.node.Start() + err := node.Start() require.NoError(err) block := getRandomBlock(1, 10) @@ -294,21 +296,22 @@ func TestGetBlock(t *testing.T) { assert.NotNil(blockResp.Block) - err = rpc.node.Stop() + err = node.Stop() require.NoError(err) } func TestGetCommit(t *testing.T) { require := require.New(t) assert := assert.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) + mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) blocks := []*types.Block{getRandomBlock(1, 5), getRandomBlock(2, 6), getRandomBlock(3, 8), getRandomBlock(4, 10)} - err := rpc.node.Start() + err := node.Start() require.NoError(err) for _, b := range blocks { @@ -333,27 +336,28 @@ func TestGetCommit(t *testing.T) { assert.Equal(blocks[3].Header.Height, uint64(commit.Height)) }) - err = rpc.node.Stop() + err = node.Stop() require.NoError(err) } func TestBlockSearch(t *testing.T) { require := require.New(t) assert := assert.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) + mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) heights := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} for _, h := range heights { block := getRandomBlock(uint64(h), 5) - _, err := rpc.node.Store.SaveBlock(block, &types.Commit{ + _, err := node.Store.SaveBlock(block, &types.Commit{ Height: uint64(h), HeaderHash: block.Header.Hash(), }, nil) require.NoError(err) } - indexBlocks(t, rpc, heights) + indexBlocks(t, node, heights) tests := []struct { query string @@ -401,7 +405,7 @@ func TestGetBlockByHash(t *testing.T) { assert := assert.New(t) require := require.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("CheckTx", mock.Anything).Return(abci.ResponseCheckTx{}) mockApp.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{}) @@ -409,11 +413,11 @@ func TestGetBlockByHash(t *testing.T) { mockApp.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) - err := rpc.node.Start() + err := node.Start() require.NoError(err) block := getRandomBlock(1, 10) - _, err = rpc.node.Store.SaveBlock(block, &types.Commit{}, nil) + _, err = node.Store.SaveBlock(block, &types.Commit{}, nil) require.NoError(err) abciBlock, err := types.ToABCIBlock(block) require.NoError(err) @@ -432,7 +436,7 @@ func TestGetBlockByHash(t *testing.T) { assert.NotNil(blockResp.Block) - err = rpc.node.Stop() + err = node.Stop() require.NoError(err) } @@ -473,7 +477,7 @@ func TestTx(t *testing.T) { require.NoError(err) require.NotNil(node) - rpc := NewClient(node) + rpc := client.NewClient(node) require.NotNil(rpc) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{}) @@ -482,7 +486,7 @@ func TestTx(t *testing.T) { mockApp.On("CheckTx", mock.Anything).Return(abci.ResponseCheckTx{}) mockApp.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) - err = rpc.node.Start() + err = node.Start() require.NoError(err) tx1 := tmtypes.Tx("tx1") @@ -525,12 +529,12 @@ func TestUnconfirmedTxs(t *testing.T) { assert := assert.New(t) require := require.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("CheckTx", mock.Anything).Return(abci.ResponseCheckTx{}) mockApp.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) - err := rpc.node.Start() + err := node.Start() require.NoError(err) for _, tx := range c.txs { @@ -566,11 +570,11 @@ func TestUnconfirmedTxsLimit(t *testing.T) { assert := assert.New(t) require := require.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("CheckTx", mock.Anything).Return(abci.ResponseCheckTx{}) - err := rpc.node.Start() + err := node.Start() require.NoError(err) tx1 := tmtypes.Tx("tx1") @@ -605,24 +609,24 @@ func TestConsensusState(t *testing.T) { resp1, err := rpc.ConsensusState(context.Background()) assert.Nil(resp1) - assert.ErrorIs(err, ErrConsensusStateNotAvailable) + assert.ErrorIs(err, client.ErrConsensusStateNotAvailable) resp2, err := rpc.DumpConsensusState(context.Background()) assert.Nil(resp2) - assert.ErrorIs(err, ErrConsensusStateNotAvailable) + assert.ErrorIs(err, client.ErrConsensusStateNotAvailable) } func TestBlockchainInfo(t *testing.T) { require := require.New(t) assert := assert.New(t) - mockApp, rpc := getRPC(t) + mockApp, rpc, node := getRPCAndNode(t) mockApp.On("BeginBlock", mock.Anything).Return(abci.ResponseBeginBlock{}) mockApp.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) heights := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} for _, h := range heights { block := getRandomBlock(uint64(h), 5) - _, err := rpc.node.Store.SaveBlock(block, &types.Commit{ + _, err := node.Store.SaveBlock(block, &types.Commit{ Height: uint64(h), HeaderHash: block.Header.Hash(), }, nil) @@ -641,19 +645,19 @@ func TestBlockchainInfo(t *testing.T) { desc: "min = 1 and max = 5", min: 1, max: 5, - exp: []*tmtypes.BlockMeta{getBlockMeta(rpc, 1), getBlockMeta(rpc, 5)}, + exp: []*tmtypes.BlockMeta{getBlockMeta(node, 1), getBlockMeta(node, 5)}, err: false, }, { desc: "min height is 0", min: 0, max: 10, - exp: []*tmtypes.BlockMeta{getBlockMeta(rpc, 1), getBlockMeta(rpc, 10)}, + exp: []*tmtypes.BlockMeta{getBlockMeta(node, 1), getBlockMeta(node, 10)}, err: false, }, { desc: "max height is out of range", min: 0, max: 15, - exp: []*tmtypes.BlockMeta{getBlockMeta(rpc, 1), getBlockMeta(rpc, 10)}, + exp: []*tmtypes.BlockMeta{getBlockMeta(node, 1), getBlockMeta(node, 10)}, err: false, }, { desc: "negative min height", @@ -753,7 +757,7 @@ func TestValidatorSetHandling(t *testing.T) { require.NoError(err) require.NotNil(node) - rpc := NewClient(node) + rpc := client.NewClient(node) require.NotNil(rpc) err = node.Start() @@ -785,7 +789,7 @@ func getRandomBlock(height uint64, nTxs int) *types.Block { block := &types.Block{ Header: types.Header{ Height: height, - Version: types.Version{Block: types.InitStateVersion.Consensus.Block}, + Version: types.Version{Block: testutil.BlockVersion}, ProposerAddress: getRandomBytes(20), }, Data: types.Data{ @@ -828,8 +832,8 @@ func getRandomBytes(n int) []byte { return data } -func getBlockMeta(rpc *Client, n int64) *tmtypes.BlockMeta { - b, err := rpc.node.Store.LoadBlock(uint64(n)) +func getBlockMeta(node *node.Node, n int64) *tmtypes.BlockMeta { + b, err := node.Store.LoadBlock(uint64(n)) if err != nil { return nil } @@ -841,7 +845,12 @@ func getBlockMeta(rpc *Client, n int64) *tmtypes.BlockMeta { return bmeta } -func getRPC(t *testing.T) (*tmmocks.MockApplication, *Client) { +func getRPC(t *testing.T) (*tmmocks.MockApplication, *client.Client) { + app, rpc, _ := getRPCAndNode(t) + return app, rpc +} + +func getRPCAndNode(t *testing.T) (*tmmocks.MockApplication, *client.Client, *node.Node) { t.Helper() require := require.New(t) app := &tmmocks.MockApplication{} @@ -888,18 +897,18 @@ func getRPC(t *testing.T) (*tmmocks.MockApplication, *Client) { require.NoError(err) require.NotNil(node) - rpc := NewClient(node) + rpc := client.NewClient(node) require.NotNil(rpc) - return app, rpc + return app, rpc, node } // From state/indexer/block/kv/kv_test -func indexBlocks(t *testing.T, rpc *Client, heights []int64) { +func indexBlocks(t *testing.T, node *node.Node, heights []int64) { t.Helper() for _, h := range heights { - require.NoError(t, rpc.node.BlockIndexer.Index(tmtypes.EventDataNewBlockHeader{ + require.NoError(t, node.BlockIndexer.Index(tmtypes.EventDataNewBlockHeader{ Header: tmtypes.Header{Height: h}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ @@ -1013,7 +1022,7 @@ func TestMempool2Nodes(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - local := NewClient(node1) + local := client.NewClient(node1) require.NotNil(local) // broadcast the bad Tx, this should not be propogated or added to the local mempool diff --git a/store/store.go b/store/store.go index 154369bba..96cf1319a 100644 --- a/store/store.go +++ b/store/store.go @@ -24,12 +24,11 @@ var ( validatorsPrefix = [1]byte{6} ) -// DefaultStore is a default store implmementation. +// DefaultStore is a default store implementation. type DefaultStore struct { db KVStore - height uint64 // the highest block saved - baseHeight uint64 // the lowest block saved + height uint64 // the highest block saved } var _ Store = &DefaultStore{} @@ -46,15 +45,9 @@ func (s *DefaultStore) NewBatch() Batch { return s.db.NewBatch() } -// SetHeight sets the height saved in the Store if it is higher than the existing height -// returns OK if the value was updated successfully or did not need to be updated -func (s *DefaultStore) SetHeight(height uint64) bool { - ok := true - storeHeight := s.Height() - if height > storeHeight { - ok = atomic.CompareAndSwapUint64(&s.height, storeHeight, height) - } - return ok +// SetHeight sets the height of the store +func (s *DefaultStore) SetHeight(height uint64) { + atomic.StoreUint64(&s.height, height) } // Height returns height of the highest block saved in the Store. @@ -62,27 +55,6 @@ func (s *DefaultStore) Height() uint64 { return atomic.LoadUint64(&s.height) } -// NextHeight returns the next height that expected to be stored in store. -func (s *DefaultStore) NextHeight() uint64 { - return s.Height() + 1 -} - -// SetBase sets the base height if it is higher than the existing base height -// returns OK if the value was updated successfully or did not need to be updated -func (s *DefaultStore) SetBase(height uint64) bool { - ok := true - baseHeight := s.Base() - if height > baseHeight { - ok = atomic.CompareAndSwapUint64(&s.baseHeight, baseHeight, height) - } - return ok -} - -// Base returns height of the earliest block saved in the Store. -func (s *DefaultStore) Base() uint64 { - return atomic.LoadUint64(&s.baseHeight) -} - // SaveBlock adds block to the store along with corresponding commit. // Stored height is updated if block height is greater than stored value. // In case a batch is provided, the block and commit are added to the batch and not saved. @@ -237,8 +209,6 @@ func (s *DefaultStore) LoadState() (types.State, error) { return types.State{}, fmt.Errorf("unmarshal state from proto: %w", err) } - atomic.StoreUint64(&s.height, state.LastStoreHeight) - atomic.StoreUint64(&s.baseHeight, state.BaseHeight) return state, nil } diff --git a/store/storeIface.go b/store/storeIface.go index f9417dcb7..b282138fa 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -73,7 +73,8 @@ type Store interface { // Pruning functions PruneBlocks(from, to uint64) (uint64, error) - // Left those for backward comptability. Should be removed in the future. + // TODO: Left those for backward compatibility all over the UT. + // Should be removed in the future. SetHeight(height uint64) Height() uint64 } diff --git a/store/store_test.go b/store/store_test.go index d9771742c..045406682 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -47,7 +47,7 @@ func TestStoreHeight(t *testing.T) { for _, block := range c.blocks { _, err := bstore.SaveBlock(block, &types.Commit{}, nil) - _ = bstore.SetHeight(block.Header.Height) + bstore.SetHeight(block.Header.Height) assert.NoError(err) } diff --git a/types/state.go b/types/state.go index 316c7eaf3..fbcb8cfff 100644 --- a/types/state.go +++ b/types/state.go @@ -101,3 +101,35 @@ func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { func (s *State) IsGenesis() bool { return s.LastBlockHeight == 0 } + +// SetHeight sets the height saved in the Store if it is higher than the existing height +// returns OK if the value was updated successfully or did not need to be updated +func (s *State) SetHeight(height uint64) bool { + ok := true + storeHeight := s.Height() + if height > storeHeight { + ok = atomic.CompareAndSwapUint64(&s.LastBlockHeight, storeHeight, height) + } + return ok +} + +// Height returns height of the highest block saved in the Store. +func (s *State) Height() uint64 { + return uint64(s.LastBlockHeight) +} + +// NextHeight returns the next height that expected to be stored in store. +func (s *State) NextHeight() uint64 { + return s.Height() + 1 +} + +// SetBase sets the base height if it is higher than the existing base height +// returns OK if the value was updated successfully or did not need to be updated +func (s *State) SetBase(height uint64) { + s.BaseHeight = height +} + +// Base returns height of the earliest block saved in the Store. +func (s *State) Base() uint64 { + return s.BaseHeight +} From 840207690a8915da224ebaaffd7cc4acdfe8c50e Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 14:13:12 +0300 Subject: [PATCH 21/35] fixed store pruning --- block/block.go | 8 ++--- da/grpc/mockserv/mockserv.go | 1 - store/pruning.go | 12 +++---- store/pruning_test.go | 70 ++++++++++++++++++++---------------- 4 files changed, 48 insertions(+), 43 deletions(-) diff --git a/block/block.go b/block/block.go index a926f7479..d99d55a13 100644 --- a/block/block.go +++ b/block/block.go @@ -104,17 +104,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta newState.LastValidators = m.State.Validators.Copy() newState.LastStoreHeight = block.Header.Height newState.BaseHeight = m.State.Base() - + if ok := m.State.SetHeight(block.Header.Height); !ok { + return fmt.Errorf("store set height: %d", block.Header.Height) + } _, err = m.Store.UpdateState(newState, nil) if err != nil { return fmt.Errorf("final update state: %w", err) } m.State = newState - if ok := m.State.SetHeight(block.Header.Height); !ok { - return fmt.Errorf("store set height: %d", block.Header.Height) - } - return nil } diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index c2bd61d1a..3fa7ee0b8 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -3,7 +3,6 @@ package mockserv import ( "context" "os" - "time" "github.com/dymensionxyz/dymint/store" tmlog "github.com/tendermint/tendermint/libs/log" diff --git a/store/pruning.go b/store/pruning.go index a469b633b..78d2ce0b4 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -5,14 +5,14 @@ import ( ) // PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned. -func (s *DefaultStore) PruneBlocks(base, height uint64) (uint64, error) { - if base <= 0 { +func (s *DefaultStore) PruneBlocks(from, to uint64) (uint64, error) { + if from <= 0 { return 0, fmt.Errorf("from height must be greater than 0") } - if height < base { + if to < from { return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", - height, base) + to, from) } pruned := uint64(0) @@ -27,7 +27,7 @@ func (s *DefaultStore) PruneBlocks(base, height uint64) (uint64, error) { return nil } - for h := base; h < height; h++ { + for h := from; h < to; h++ { hash, err := s.loadHashFromIndex(h) if err != nil { continue @@ -61,7 +61,7 @@ func (s *DefaultStore) PruneBlocks(base, height uint64) (uint64, error) { } } - err := flush(batch, height) + err := flush(batch, to) if err != nil { return 0, err } diff --git a/store/pruning_test.go b/store/pruning_test.go index dc719efd7..8772cf152 100644 --- a/store/pruning_test.go +++ b/store/pruning_test.go @@ -12,42 +12,38 @@ import ( func TestStorePruning(t *testing.T) { t.Parallel() - pruningHeight := uint64(3) - cases := []struct { - name string - blocks []*types.Block - pruningHeight uint64 - expectedBase uint64 - expectedHeight uint64 - shouldError bool + name string + blocks []*types.Block + from uint64 + to uint64 + shouldError bool }{ + //todo :check exclusion of pruning height + {"blocks with pruning", []*types.Block{ testutil.GetRandomBlock(1, 0), testutil.GetRandomBlock(2, 0), testutil.GetRandomBlock(3, 0), testutil.GetRandomBlock(4, 0), testutil.GetRandomBlock(5, 0), - }, pruningHeight, pruningHeight, 5, false}, + }, 3, 5, false}, {"blocks out of order", []*types.Block{ testutil.GetRandomBlock(2, 0), testutil.GetRandomBlock(3, 0), testutil.GetRandomBlock(1, 0), - }, pruningHeight, pruningHeight, 3, false}, + testutil.GetRandomBlock(5, 0), + }, 3, 5, false}, {"with a gap", []*types.Block{ testutil.GetRandomBlock(1, 0), testutil.GetRandomBlock(9, 0), testutil.GetRandomBlock(10, 0), - }, pruningHeight, pruningHeight, 10, false}, - {"pruning beyond latest height", []*types.Block{ - testutil.GetRandomBlock(1, 0), - testutil.GetRandomBlock(2, 0), - }, pruningHeight, 1, 2, true}, // should error because pruning height > latest height + }, 3, 5, false}, {"pruning height 0", []*types.Block{ testutil.GetRandomBlock(1, 0), testutil.GetRandomBlock(2, 0), testutil.GetRandomBlock(3, 0), - }, 0, 1, 3, true}, + }, 0, 1, true}, } for _, c := range cases { @@ -56,31 +52,43 @@ func TestStorePruning(t *testing.T) { bstore := store.New(store.NewDefaultInMemoryKVStore()) assert.Equal(uint64(0), bstore.Height()) + savedHeights := make(map[uint64]bool) for _, block := range c.blocks { _, err := bstore.SaveBlock(block, &types.Commit{}, nil) - _ = bstore.SetHeight(block.Header.Height) + assert.NoError(err) + savedHeights[block.Header.Height] = true + + //TODO: add block responses and commits + } + + // TODO: assert blocks exists + for k, _ := range savedHeights { + _, err := bstore.LoadBlock(k) assert.NoError(err) } - _, err := bstore.PruneBlocks(int64(c.pruningHeight)) + _, err := bstore.PruneBlocks(c.from, c.to) if c.shouldError { assert.Error(err) - } else { - assert.NoError(err) - assert.Equal(pruningHeight, bstore.Base()) - assert.Equal(c.expectedHeight, bstore.Height()) - assert.Equal(c.expectedBase, bstore.Base()) + return + } + + assert.NoError(err) - // Check if pruned blocks are really removed from the store - for h := uint64(1); h < pruningHeight; h++ { - _, err := bstore.LoadBlock(h) - assert.Error(err, "Block at height %d should be pruned", h) + // Validate only blocks in the range are pruned + for k, _ := range savedHeights { + if k >= c.from && k < c.to { + _, err := bstore.LoadBlock(k) + assert.Error(err, "Block at height %d should be pruned", k) - _, err = bstore.LoadBlockResponses(h) - assert.Error(err, "BlockResponse at height %d should be pruned", h) + _, err = bstore.LoadBlockResponses(k) + assert.Error(err, "BlockResponse at height %d should be pruned", k) - _, err = bstore.LoadCommit(h) - assert.Error(err, "Commit at height %d should be pruned", h) + _, err = bstore.LoadCommit(k) + assert.Error(err, "Commit at height %d should be pruned", k) + } else { + _, err := bstore.LoadBlock(k) + assert.NoError(err) } } }) From 90031e7c93cede108ea47733624e7be3196d49b3 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 14:58:58 +0300 Subject: [PATCH 22/35] updated block manager to use state. rpc needs fix --- block/executor_test.go | 4 ++-- block/manager.go | 2 -- block/manager_test.go | 8 +++---- block/production_test.go | 32 +++++++++++++------------- block/retriever.go | 6 ++--- block/submit.go | 4 ++-- block/submit_test.go | 18 +++++++-------- block/synctarget.go | 4 ++-- node/node.go | 5 +++++ rpc/client/client.go | 13 +++++------ rpc/client/client_test.go | 9 +++----- store/pruning_test.go | 3 +-- store/storeIface.go | 5 ----- store/store_test.go | 47 +++------------------------------------ testutil/mocks.go | 6 +---- 15 files changed, 57 insertions(+), 109 deletions(-) diff --git a/block/executor_test.go b/block/executor_test.go index d09a18be3..05aeb936a 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -185,7 +185,7 @@ func TestApplyBlock(t *testing.T) { newState, err := executor.UpdateStateFromResponses(resp, state, block) require.NoError(err) require.NotNil(newState) - assert.Equal(int64(1), newState.LastBlockHeight) + assert.Equal(1, newState.LastBlockHeight) _, err = executor.Commit(&newState, block, resp) require.NoError(err) assert.Equal(mockAppHash, newState.AppHash) @@ -239,7 +239,7 @@ func TestApplyBlock(t *testing.T) { newState, err = executor.UpdateStateFromResponses(resp, state, block) require.NoError(err) require.NotNil(newState) - assert.Equal(int64(2), newState.LastBlockHeight) + assert.Equal(uint64(2), newState.LastBlockHeight) _, err = executor.Commit(&newState, block, resp) require.NoError(err) diff --git a/block/manager.go b/block/manager.go index 537ae4eff..c2d1d401c 100644 --- a/block/manager.go +++ b/block/manager.go @@ -253,7 +253,5 @@ func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger type return types.State{}, fmt.Errorf("get initial state: %w", err) } - // init store according to state - store.SetHeight(s.Height()) return s, nil } diff --git a/block/manager_test.go b/block/manager_test.go index b40b5cc88..b9c55991c 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -135,7 +135,7 @@ func TestProduceOnlyAfterSynced(t *testing.T) { // Initially sync target is 0 assert.Zero(t, manager.SyncTarget.Load()) - assert.True(t, manager.Store.Height() == 0) + assert.True(t, manager.State.Height() == 0) // enough time to sync and produce blocks ctx, cancel := context.WithTimeout(context.Background(), time.Second*4) @@ -150,7 +150,7 @@ func TestProduceOnlyAfterSynced(t *testing.T) { <-ctx.Done() assert.Equal(t, batch.EndHeight, manager.SyncTarget.Load()) // validate that we produced blocks - assert.Greater(t, manager.Store.Height(), batch.EndHeight) + assert.Greater(t, manager.State.Height(), batch.EndHeight) } func TestRetrieveDaBatchesFailed(t *testing.T) { @@ -184,7 +184,7 @@ func TestProduceNewBlock(t *testing.T) { _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.NoError(t, err) // Validate state is updated with the commit hash - assert.Equal(t, uint64(1), manager.Store.Height()) + assert.Equal(t, uint64(1), manager.State.Height()) assert.Equal(t, commitHash, manager.State.AppHash) } @@ -307,7 +307,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { mockStore.ShouldFailSetHeight = tc.shouldFailSetSetHeight mockStore.ShoudFailUpdateState = tc.shouldFailUpdateState _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) - require.Equal(tc.expectedStoreHeight, manager.Store.Height(), tc.name) + require.Equal(tc.expectedStoreHeight, manager.State.Height(), tc.name) require.Equal(tc.expectedStateAppHash, manager.State.AppHash, tc.name) storeState, err := manager.Store.LoadState() require.NoError(err) diff --git a/block/production_test.go b/block/production_test.go index a48ba8372..7b04ea349 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -53,7 +53,7 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { // Check initial height initialHeight := uint64(0) - require.Equal(initialHeight, manager.Store.Height()) + require.Equal(initialHeight, manager.State.Height()) mCtx, cancel := context.WithTimeout(context.Background(), runTime) defer cancel() @@ -66,15 +66,15 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { go managerWithEmptyBlocks.AccumulatedDataLoop(mCtx, buf2) <-mCtx.Done() - require.Greater(manager.Store.Height(), initialHeight) - require.Greater(managerWithEmptyBlocks.Store.Height(), initialHeight) - assert.Greater(managerWithEmptyBlocks.Store.Height(), manager.Store.Height()) + require.Greater(manager.State.Height(), initialHeight) + require.Greater(managerWithEmptyBlocks.State.Height(), initialHeight) + assert.Greater(managerWithEmptyBlocks.State.Height(), manager.State.Height()) // Check that blocks are created with empty blocks feature disabled - assert.LessOrEqual(manager.Store.Height(), uint64(runTime/EmptyBlocksMaxTime)) - assert.LessOrEqual(managerWithEmptyBlocks.Store.Height(), uint64(runTime/blockTime)) + assert.LessOrEqual(manager.State.Height(), uint64(runTime/EmptyBlocksMaxTime)) + assert.LessOrEqual(managerWithEmptyBlocks.State.Height(), uint64(runTime/blockTime)) - for i := uint64(2); i < managerWithEmptyBlocks.Store.Height(); i++ { + for i := uint64(2); i < managerWithEmptyBlocks.State.Height(); i++ { prevBlock, err := managerWithEmptyBlocks.Store.LoadBlock(i - 1) assert.NoError(err) @@ -87,7 +87,7 @@ func TestCreateEmptyBlocksEnableDisable(t *testing.T) { assert.Less(diff, blockTime+blockTime/10) } - for i := uint64(2); i < manager.Store.Height(); i++ { + for i := uint64(2); i < manager.State.Height(); i++ { prevBlock, err := manager.Store.LoadBlock(i - 1) assert.NoError(err) @@ -139,7 +139,7 @@ func TestCreateEmptyBlocksNew(t *testing.T) { // Check initial height expectedHeight := uint64(0) - assert.Equal(expectedHeight, manager.Store.Height()) + assert.Equal(expectedHeight, manager.State.Height()) mCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() @@ -151,8 +151,8 @@ func TestCreateEmptyBlocksNew(t *testing.T) { <-mCtx.Done() foundTx := false - assert.LessOrEqual(manager.Store.Height(), uint64(10)) - for i := uint64(2); i < manager.Store.Height(); i++ { + assert.LessOrEqual(manager.State.Height(), uint64(10)) + for i := uint64(2); i < manager.State.Height(); i++ { prevBlock, err := manager.Store.LoadBlock(i - 1) assert.NoError(err) @@ -226,7 +226,7 @@ func TestStopBlockProduction(t *testing.T) { // validate initial accumulated is zero require.Equal(manager.AccumulatedBatchSize.Load(), uint64(0)) - assert.Equal(manager.Store.Height(), uint64(0)) + assert.Equal(manager.State.Height(), uint64(0)) // subscribe to health status event eventReceivedCh := make(chan error) @@ -254,7 +254,7 @@ func TestStopBlockProduction(t *testing.T) { // validate block production works time.Sleep(400 * time.Millisecond) - assert.Greater(manager.Store.Height(), uint64(0)) + assert.Greater(manager.State.Height(), uint64(0)) assert.Greater(manager.AccumulatedBatchSize.Load(), uint64(0)) // we don't read from the submit channel, so we assume it get full @@ -266,11 +266,11 @@ func TestStopBlockProduction(t *testing.T) { assert.Error(err) } - stoppedHeight := manager.Store.Height() + stoppedHeight := manager.State.Height() // make sure block production is stopped time.Sleep(400 * time.Millisecond) - assert.Equal(stoppedHeight, manager.Store.Height()) + assert.Equal(stoppedHeight, manager.State.Height()) // consume the signal <-toSubmit @@ -285,5 +285,5 @@ func TestStopBlockProduction(t *testing.T) { // make sure block production is resumed time.Sleep(400 * time.Millisecond) - assert.Greater(manager.Store.Height(), stoppedHeight) + assert.Greater(manager.State.Height(), stoppedHeight) } diff --git a/block/retriever.go b/block/retriever.go index c41485e6b..d1e98d82f 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -37,7 +37,7 @@ func (m *Manager) RetrieveLoop(ctx context.Context) { // It fetches the batches from the settlement, gets the DA height and gets // the actual blocks from the DA. func (m *Manager) syncUntilTarget(targetHeight uint64) error { - for currH := m.Store.Height(); currH < targetHeight; currH = m.Store.Height() { + for currH := m.State.Height(); currH < targetHeight; currH = m.State.Height() { // It's important that we query the state index before fetching the batch, rather // than e.g. keep it and increment it, because we might be concurrently applying blocks @@ -61,7 +61,7 @@ func (m *Manager) syncUntilTarget(targetHeight uint64) error { } - m.logger.Info("Synced", "store height", m.Store.Height(), "target height", targetHeight) + m.logger.Info("Synced", "store height", m.State.Height(), "target height", targetHeight) err := m.attemptApplyCachedBlocks() if err != nil { @@ -76,7 +76,7 @@ func (m *Manager) queryStateIndex() (uint64, error) { var stateIndex uint64 return stateIndex, retry.Do( func() error { - res, err := m.SLClient.GetHeightState(m.Store.Height() + 1) + res, err := m.SLClient.GetHeightState(m.State.Height() + 1) if err != nil { m.logger.Debug("sl client get height state", "error", err) return err diff --git a/block/submit.go b/block/submit.go index 09d19e1f7..0c1c00cbf 100644 --- a/block/submit.go +++ b/block/submit.go @@ -102,7 +102,7 @@ func (m *Manager) AccumulatedDataLoop(ctx context.Context, toSubmit chan bool) { // Finally, it submits the next batch of blocks and updates the sync target to the height of the last block in the submitted batch. func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { // Load current sync target and height to determine if new blocks are available for submission. - if m.Store.Height() <= m.SyncTarget.Load() { + if m.State.Height() <= m.SyncTarget.Load() { return nil // No new blocks have been produced } @@ -129,7 +129,7 @@ func (m *Manager) HandleSubmissionTrigger(ctx context.Context) error { func (m *Manager) createNextBatch() (*types.Batch, error) { // Create the batch startHeight := m.SyncTarget.Load() + 1 - endHeight := m.Store.Height() + endHeight := m.State.Height() nextBatch, err := m.CreateNextBatchToSubmit(startHeight, endHeight) if err != nil { m.logger.Error("create next batch", "startHeight", startHeight, "endHeight", endHeight, "error", err) diff --git a/block/submit_test.go b/block/submit_test.go index 8e4534336..725a8364d 100644 --- a/block/submit_test.go +++ b/block/submit_test.go @@ -38,18 +38,18 @@ func TestBatchSubmissionHappyFlow(t *testing.T) { // Check initial assertions initialHeight := uint64(0) - require.Zero(manager.Store.Height()) + require.Zero(manager.State.Height()) require.Zero(manager.SyncTarget.Load()) // Produce block and validate that we produced blocks _, _, err = manager.ProduceAndGossipBlock(ctx, true) require.NoError(err) - assert.Greater(t, manager.Store.Height(), initialHeight) + assert.Greater(t, manager.State.Height(), initialHeight) assert.Zero(t, manager.SyncTarget.Load()) // submit and validate sync target manager.HandleSubmissionTrigger(ctx) - assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) + assert.EqualValues(t, manager.State.Height(), manager.SyncTarget.Load()) } func TestBatchSubmissionFailedSubmission(t *testing.T) { @@ -85,13 +85,13 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { // Check initial assertions initialHeight := uint64(0) - require.Zero(manager.Store.Height()) + require.Zero(manager.State.Height()) require.Zero(manager.SyncTarget.Load()) // Produce block and validate that we produced blocks _, _, err = manager.ProduceAndGossipBlock(ctx, true) require.NoError(err) - assert.Greater(t, manager.Store.Height(), initialHeight) + assert.Greater(t, manager.State.Height(), initialHeight) assert.Zero(t, manager.SyncTarget.Load()) // try to submit, we expect failure @@ -101,7 +101,7 @@ func TestBatchSubmissionFailedSubmission(t *testing.T) { // try to submit again, we expect success mockLayerI.On("SubmitBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() manager.HandleSubmissionTrigger(ctx) - assert.EqualValues(t, manager.Store.Height(), manager.SyncTarget.Load()) + assert.EqualValues(t, manager.State.Height(), manager.SyncTarget.Load()) } // TestSubmissionByTime tests the submission trigger by time @@ -134,7 +134,7 @@ func TestSubmissionByTime(t *testing.T) { // Check initial height initialHeight := uint64(0) - require.Equal(initialHeight, manager.Store.Height()) + require.Equal(initialHeight, manager.State.Height()) require.Zero(manager.SyncTarget.Load()) var wg sync.WaitGroup @@ -187,7 +187,7 @@ func TestSubmissionByBatchSize(t *testing.T) { // validate initial accumulated is zero require.Equal(manager.AccumulatedBatchSize.Load(), uint64(0)) - assert.Equal(manager.Store.Height(), uint64(0)) + assert.Equal(manager.State.Height(), uint64(0)) var wg sync.WaitGroup wg.Add(2) // Add 2 because we have 2 goroutines @@ -208,7 +208,7 @@ func TestSubmissionByBatchSize(t *testing.T) { // wait for block to be produced but not for submission threshold time.Sleep(200 * time.Millisecond) // assert block produced but nothing submitted yet - assert.Greater(manager.Store.Height(), uint64(0)) + assert.Greater(manager.State.Height(), uint64(0)) assert.Greater(manager.AccumulatedBatchSize.Load(), uint64(0)) assert.Zero(manager.SyncTarget.Load()) diff --git a/block/synctarget.go b/block/synctarget.go index 471a0334a..9a33bb050 100644 --- a/block/synctarget.go +++ b/block/synctarget.go @@ -25,13 +25,13 @@ func (m *Manager) SyncTargetLoop(ctx context.Context) { case event := <-subscription.Out(): eventData := event.Data().(*settlement.EventDataNewBatchAccepted) - if eventData.EndHeight <= m.Store.Height() { + if eventData.EndHeight <= m.State.Height() { m.logger.Debug( "syncTargetLoop: received new settlement batch accepted with batch end height <= current store height, skipping.", "height", eventData.EndHeight, "currentHeight", - m.Store.Height(), + m.State.Height(), ) continue } diff --git a/node/node.go b/node/node.go index fe367d0b3..5a9628bd6 100644 --- a/node/node.go +++ b/node/node.go @@ -382,3 +382,8 @@ func (n *Node) startPrometheusServer() error { } return nil } + +// FIXME: read from block manager +func (n *Node) GetBlockManagerHeight() uint64 { + return 0 +} diff --git a/rpc/client/client.go b/rpc/client/client.go index c66e6209f..40246ab77 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -313,10 +313,9 @@ func (c *Client) GenesisChunked(context context.Context, id uint) (*ctypes.Resul func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 - // Currently blocks are not pruned and are synced linearly so the base height is 0 minHeight, maxHeight, err := filterMinMax( - 0, - int64(c.node.Store.Height()), + 0, //FIXME: we might be pruned + int64(c.node.GetBlockManagerHeight()), minHeight, maxHeight, limit) @@ -341,7 +340,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) } return &ctypes.ResultBlockchainInfo{ - LastHeight: int64(c.node.Store.Height()), + LastHeight: int64(c.node.GetBlockManagerHeight()), BlockMetas: blocks, }, nil } @@ -468,7 +467,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h uint64 if height == nil { - h = c.node.Store.Height() + h = c.node.GetBlockManagerHeight() } else { h = uint64(*height) } @@ -698,7 +697,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i // Status returns detailed information about current status of the node. func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - latest, err := c.node.Store.LoadBlock(c.node.Store.Height()) + latest, err := c.node.Store.LoadBlock(c.node.GetBlockManagerHeight()) if err != nil { // TODO(tzdybal): extract error return nil, fmt.Errorf("find latest block: %w", err) @@ -877,7 +876,7 @@ func (c *Client) Snapshot() proxy.AppConnSnapshot { func (c *Client) normalizeHeight(height *int64) uint64 { var heightValue uint64 if height == nil || *height == 0 { - heightValue = c.node.Store.Height() + heightValue = c.node.GetBlockManagerHeight() } else { heightValue = uint64(*height) } diff --git a/rpc/client/client_test.go b/rpc/client/client_test.go index 931c54b15..5d780172b 100644 --- a/rpc/client/client_test.go +++ b/rpc/client/client_test.go @@ -286,14 +286,12 @@ func TestGetBlock(t *testing.T) { require.NoError(err) block := getRandomBlock(1, 10) - _, err = rpc.node.Store.SaveBlock(block, &types.Commit{}, nil) - rpc.node.Store.SetHeight(block.Header.Height) + _, err = node.Store.SaveBlock(block, &types.Commit{}, nil) require.NoError(err) blockResp, err := rpc.Block(context.Background(), nil) require.NoError(err) require.NotNil(blockResp) - assert.NotNil(blockResp.Block) err = node.Stop() @@ -315,8 +313,7 @@ func TestGetCommit(t *testing.T) { require.NoError(err) for _, b := range blocks { - _, err = rpc.node.Store.SaveBlock(b, &types.Commit{Height: b.Header.Height}, nil) - rpc.node.Store.SetHeight(b.Header.Height) + _, err = node.Store.SaveBlock(b, &types.Commit{Height: b.Header.Height}, nil) require.NoError(err) } t.Run("Fetch all commits", func(t *testing.T) { @@ -617,6 +614,7 @@ func TestConsensusState(t *testing.T) { } func TestBlockchainInfo(t *testing.T) { + t.Skip("Test disabled as we need to increase the height of the block manager") //FIXME require := require.New(t) assert := assert.New(t) mockApp, rpc, node := getRPCAndNode(t) @@ -630,7 +628,6 @@ func TestBlockchainInfo(t *testing.T) { Height: uint64(h), HeaderHash: block.Header.Hash(), }, nil) - rpc.node.Store.SetHeight(block.Header.Height) require.NoError(err) } diff --git a/store/pruning_test.go b/store/pruning_test.go index 8772cf152..295c95daa 100644 --- a/store/pruning_test.go +++ b/store/pruning_test.go @@ -50,7 +50,6 @@ func TestStorePruning(t *testing.T) { t.Run(c.name, func(t *testing.T) { assert := assert.New(t) bstore := store.New(store.NewDefaultInMemoryKVStore()) - assert.Equal(uint64(0), bstore.Height()) savedHeights := make(map[uint64]bool) for _, block := range c.blocks { @@ -61,7 +60,7 @@ func TestStorePruning(t *testing.T) { //TODO: add block responses and commits } - // TODO: assert blocks exists + // Validate all blocks are saved for k, _ := range savedHeights { _, err := bstore.LoadBlock(k) assert.NoError(err) diff --git a/store/storeIface.go b/store/storeIface.go index b282138fa..051fc4316 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -72,9 +72,4 @@ type Store interface { // Pruning functions PruneBlocks(from, to uint64) (uint64, error) - - // TODO: Left those for backward compatibility all over the UT. - // Should be removed in the future. - SetHeight(height uint64) - Height() uint64 } diff --git a/store/store_test.go b/store/store_test.go index 045406682..ffd9d51ff 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -15,47 +15,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestStoreHeight(t *testing.T) { - t.Parallel() - cases := []struct { - name string - blocks []*types.Block - expected uint64 - }{ - {"single block", []*types.Block{testutil.GetRandomBlock(1, 0)}, 1}, - {"two consecutive blocks", []*types.Block{ - testutil.GetRandomBlock(1, 0), - testutil.GetRandomBlock(2, 0), - }, 2}, - {"blocks out of order", []*types.Block{ - testutil.GetRandomBlock(2, 0), - testutil.GetRandomBlock(3, 0), - testutil.GetRandomBlock(1, 0), - }, 3}, - {"with a gap", []*types.Block{ - testutil.GetRandomBlock(1, 0), - testutil.GetRandomBlock(9, 0), - testutil.GetRandomBlock(10, 0), - }, 10}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - assert := assert.New(t) - bstore := store.New(store.NewDefaultInMemoryKVStore()) - assert.Equal(uint64(0), bstore.Height()) - - for _, block := range c.blocks { - _, err := bstore.SaveBlock(block, &types.Commit{}, nil) - bstore.SetHeight(block.Header.Height) - assert.NoError(err) - } - - assert.Equal(c.expected, bstore.Height()) - }) - } -} - func TestStoreLoad(t *testing.T) { t.Parallel() cases := []struct { @@ -124,7 +83,7 @@ func TestStoreLoad(t *testing.T) { } } -func TestRestart(t *testing.T) { +func TestLoadState(t *testing.T) { t.Parallel() assert := assert.New(t) @@ -144,10 +103,10 @@ func TestRestart(t *testing.T) { assert.NoError(err) s2 := store.New(kv) - _, err = s2.LoadState() + state, err := s2.LoadState() assert.NoError(err) - assert.Equal(expectedHeight, s2.Height()) + assert.Equal(expectedHeight, state.LastBlockHeight) } func TestBlockResponses(t *testing.T) { diff --git a/testutil/mocks.go b/testutil/mocks.go index 5f97ee0e3..3cb3a3388 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -99,12 +99,8 @@ type MockStore struct { // SetHeight sets the height of the mock store // Don't set the height to mock failure in setting the height -func (m *MockStore) SetHeight(height uint64) bool { - if m.ShouldFailSetHeight { - return false - } +func (m *MockStore) SetHeight(height uint64) { m.height = height - return true } func (m *MockStore) Height() uint64 { From d2be2ce0cf74776e079c0fa0653b103d20b86ec9 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Thu, 9 May 2024 19:50:03 +0300 Subject: [PATCH 23/35] cleanup --- block/initchain.go | 1 - block/manager.go | 10 ---------- block/produce.go | 1 - block/types.go | 11 +++++++++++ 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/block/initchain.go b/block/initchain.go index abd011bfe..c0a0e0b40 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -25,7 +25,6 @@ func (m *Manager) RunInitChain(ctx context.Context) error { // update the state with only the consensus pubkey m.Executor.UpdateStateAfterInitChain(&m.State, res, gensisValSet) m.Executor.UpdateMempoolAfterInitChain(&m.State) - if _, err := m.Store.UpdateState(m.State, nil); err != nil { return err } diff --git a/block/manager.go b/block/manager.go index c2d1d401c..e6def7792 100644 --- a/block/manager.go +++ b/block/manager.go @@ -18,8 +18,6 @@ import ( "github.com/dymensionxyz/dymint/p2p" "github.com/libp2p/go-libp2p/core/crypto" - - tmcrypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/pubsub" tmtypes "github.com/tendermint/tendermint/types" @@ -209,14 +207,6 @@ func (m *Manager) UpdateSyncParams(endHeight uint64) { m.lastSubmissionTime.Store(time.Now().UnixNano()) } -func getAddress(key crypto.PrivKey) ([]byte, error) { - rawKey, err := key.GetPublic().Raw() - if err != nil { - return nil, err - } - return tmcrypto.AddressHash(rawKey), nil -} - // TODO: move to gossip.go // onNewGossippedBlock will take a block and apply it func (m *Manager) onNewGossipedBlock(event pubsub.Message) { diff --git a/block/produce.go b/block/produce.go index 0b5abbce1..8932ee7a1 100644 --- a/block/produce.go +++ b/block/produce.go @@ -118,7 +118,6 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er newHeight = uint64(m.State.InitialHeight) lastCommit = &types.Commit{} m.State.BaseHeight = newHeight - m.State.SetBase(newHeight) } else { height := m.State.Height() newHeight = height + 1 diff --git a/block/types.go b/block/types.go index 3f5744f8f..7dfb02981 100644 --- a/block/types.go +++ b/block/types.go @@ -2,6 +2,9 @@ package block import ( "github.com/dymensionxyz/dymint/types" + + "github.com/libp2p/go-libp2p/core/crypto" + tmcrypto "github.com/tendermint/tendermint/crypto" ) // TODO: move to types package @@ -22,3 +25,11 @@ type CachedBlock struct { Block *types.Block Commit *types.Commit } + +func getAddress(key crypto.PrivKey) ([]byte, error) { + rawKey, err := key.GetPublic().Raw() + if err != nil { + return nil, err + } + return tmcrypto.AddressHash(rawKey), nil +} From d9698f80faf4d4da3c9fed3b8be5dc5fb12fd2ac Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 13:05:19 +0300 Subject: [PATCH 24/35] simplified commit --- block/block.go | 61 +++++++++-------------------------------------- block/executor.go | 20 +++++++++------- block/manager.go | 13 +--------- block/state.go | 34 ++++++++++++++++++++++++++ types/state.go | 12 ++++++++++ 5 files changed, 70 insertions(+), 70 deletions(-) diff --git a/block/block.go b/block/block.go index d99d55a13..76b3cd2a8 100644 --- a/block/block.go +++ b/block/block.go @@ -1,14 +1,11 @@ package block import ( - "context" "fmt" errorsmod "cosmossdk.io/errors" - "github.com/dymensionxyz/dymint/p2p" "github.com/dymensionxyz/dymint/types" - tmtypes "github.com/tendermint/tendermint/types" ) // applyBlock applies the block to the store and the abci app. @@ -18,7 +15,6 @@ import ( // - block height is the expected block height on the store (height + 1). // - block height is the expected block height on the app (last block height + 1). func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMetaData blockMetaData) error { - // TODO (#330): allow genesis block with height > 0 to be applied. // TODO: add switch case to have defined behavior for each case. // validate block height if block.Header.Height != m.State.NextHeight() { @@ -58,33 +54,32 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("update state from responses: %w", err) } - batch := m.Store.NewBatch() - - batch, err = m.Store.SaveBlockResponses(block.Header.Height, responses, batch) + dbBatch := m.Store.NewBatch() + dbBatch, err = m.Store.SaveBlockResponses(block.Header.Height, responses, dbBatch) if err != nil { - batch.Discard() + dbBatch.Discard() return fmt.Errorf("save block responses: %w", err) } m.State = newState - batch, err = m.Store.UpdateState(m.State, batch) + dbBatch, err = m.Store.UpdateState(m.State, dbBatch) if err != nil { - batch.Discard() + dbBatch.Discard() return fmt.Errorf("update state: %w", err) } - batch, err = m.Store.SaveValidators(block.Header.Height, m.State.Validators, batch) + dbBatch, err = m.Store.SaveValidators(block.Header.Height, m.State.Validators, dbBatch) if err != nil { - batch.Discard() + dbBatch.Discard() return fmt.Errorf("save validators: %w", err) } - err = batch.Commit() + err = dbBatch.Commit() if err != nil { return fmt.Errorf("commit batch to disk: %w", err) } // Commit block to app - retainHeight, err := m.Executor.Commit(&newState, block, responses) + appHash, retainHeight, err := m.Executor.Commit(&newState, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } @@ -97,16 +92,12 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } else { m.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) } + newState.BaseHeight = m.State.Base() } // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - newState.LastValidators = m.State.Validators.Copy() - newState.LastStoreHeight = block.Header.Height - newState.BaseHeight = m.State.Base() - if ok := m.State.SetHeight(block.Header.Height); !ok { - return fmt.Errorf("store set height: %d", block.Header.Height) - } + newState.SetABCICommitResult(responses, appHash, block.Header.Height) _, err = m.Store.UpdateState(newState, nil) if err != nil { return fmt.Errorf("final update state: %w", err) @@ -160,36 +151,6 @@ func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { return isBlockAlreadyApplied, nil } -// UpdateStateFromApp is responsible for aligning the state of the store from the abci app -func (m *Manager) UpdateStateFromApp() error { - proxyAppInfo, err := m.Executor.GetAppInfo() - if err != nil { - return errorsmod.Wrap(err, "get app info") - } - - appHeight := uint64(proxyAppInfo.LastBlockHeight) - - // update the state with the hash, last store height and last validators. - m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) - m.State.LastStoreHeight = appHeight - m.State.LastValidators = m.State.Validators.Copy() - - resp, err := m.Store.LoadBlockResponses(appHeight) - if err != nil { - return errorsmod.Wrap(err, "load block responses") - } - copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - - if ok := m.State.SetHeight(appHeight); !ok { - return fmt.Errorf("state set height: %d", appHeight) - } - _, err = m.Store.UpdateState(m.State, nil) - if err != nil { - return errorsmod.Wrap(err, "update state") - } - return nil -} - func (m *Manager) validateBlock(block *types.Block, commit *types.Commit) error { // Currently we're assuming proposer is never nil as it's a pre-condition for // dymint to start diff --git a/block/executor.go b/block/executor.go index 65f39ccc7..206a97ac3 100644 --- a/block/executor.go +++ b/block/executor.go @@ -13,6 +13,8 @@ import ( tmtypes "github.com/tendermint/tendermint/types" "go.uber.org/multierr" + libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + "github.com/dymensionxyz/dymint/mempool" "github.com/dymensionxyz/dymint/types" ) @@ -33,7 +35,12 @@ type Executor struct { // NewExecutor creates new instance of BlockExecutor. // Proposer address and namespace ID will be used in all newly created blocks. -func NewExecutor(proposerAddress []byte, namespaceID string, chainID string, mempool mempool.Mempool, proxyApp proxy.AppConns, eventBus *tmtypes.EventBus, logger types.Logger) (*Executor, error) { +func NewExecutor(proposerKey libp2pcrypto.PrivKey, namespaceID string, chainID string, mempool mempool.Mempool, proxyApp proxy.AppConns, eventBus *tmtypes.EventBus, logger types.Logger) (*Executor, error) { + proposerAddress, err := getAddress(proposerKey) + if err != nil { + return nil, err + } + bytes, err := hex.DecodeString(namespaceID) if err != nil { return nil, err @@ -134,21 +141,18 @@ func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHead } // Commit commits the block -func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) (int64, error) { +func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { - return 0, err + return nil, 0, err } - copy(state.AppHash[:], appHash[:]) - copy(state.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - err = e.publishEvents(resp, block, *state) if err != nil { e.logger.Error("fire block events", "error", err) - return 0, err + return nil, 0, err } - return retainHeight, nil + return appHash, retainHeight, nil } // GetAppInfo returns the latest AppInfo from the proxyApp. diff --git a/block/manager.go b/block/manager.go index 59c7a091c..3cb89ed87 100644 --- a/block/manager.go +++ b/block/manager.go @@ -29,12 +29,6 @@ import ( "github.com/dymensionxyz/dymint/types" ) -const ( - // max amount of pending batches to be submitted. block production will be paused if this limit is reached. - // TODO: make this configurable - maxSupportedBatchSkew = 10 -) - // Manager is responsible for aggregating transactions into blocks. type Manager struct { // Configuration @@ -92,12 +86,7 @@ func NewManager( p2pClient *p2p.Client, logger types.Logger, ) (*Manager, error) { - proposerAddress, err := getAddress(proposerKey) - if err != nil { - return nil, err - } - - exec, err := NewExecutor(proposerAddress, conf.NamespaceID, genesis.ChainID, mempool, proxyApp, eventBus, logger) + exec, err := NewExecutor(proposerKey, conf.NamespaceID, genesis.ChainID, mempool, proxyApp, eventBus, logger) if err != nil { return nil, fmt.Errorf("create block executor: %w", err) } diff --git a/block/state.go b/block/state.go index e41093659..1aeea7913 100644 --- a/block/state.go +++ b/block/state.go @@ -1,8 +1,11 @@ package block import ( + "fmt" "time" + errorsmod "cosmossdk.io/errors" + "github.com/cometbft/cometbft/crypto/merkle" abci "github.com/tendermint/tendermint/abci/types" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -13,6 +16,37 @@ import ( ) // TODO: move all those methods from blockExecutor to manager + +// UpdateStateFromApp is responsible for aligning the state of the store from the abci app +func (m *Manager) UpdateStateFromApp() error { + proxyAppInfo, err := m.Executor.GetAppInfo() + if err != nil { + return errorsmod.Wrap(err, "get app info") + } + + appHeight := uint64(proxyAppInfo.LastBlockHeight) + + // update the state with the hash, last store height and last validators. + m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) + m.State.LastStoreHeight = appHeight + m.State.LastValidators = m.State.Validators.Copy() + + resp, err := m.Store.LoadBlockResponses(appHeight) + if err != nil { + return errorsmod.Wrap(err, "load block responses") + } + copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + + if ok := m.State.SetHeight(appHeight); !ok { + return fmt.Errorf("state set height: %d", appHeight) + } + _, err = m.Store.UpdateState(m.State, nil) + if err != nil { + return errorsmod.Wrap(err, "update state") + } + return nil +} + func (e *Executor) updateState(state types.State, block *types.Block, abciResponses *tmstate.ABCIResponses, validatorUpdates []*tmtypes.Validator) (types.State, error) { nValSet := state.NextValidators.Copy() lastHeightValSetChanged := state.LastHeightValidatorsChanged diff --git a/types/state.go b/types/state.go index fbcb8cfff..056d22d31 100644 --- a/types/state.go +++ b/types/state.go @@ -5,6 +5,8 @@ import ( "sync/atomic" "time" + tmtypes "github.com/tendermint/tendermint/types" + // TODO(tzdybal): copy to local project? tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -133,3 +135,13 @@ func (s *State) SetBase(height uint64) { func (s *State) Base() uint64 { return s.BaseHeight } + +// SetABCICommitResult +func (s *State) SetABCICommitResult(resp *tmstate.ABCIResponses, appHash []byte, height uint64) { + copy(s.AppHash[:], appHash[:]) + copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + + s.LastValidators = s.Validators.Copy() + s.LastStoreHeight = height + s.SetHeight(height) +} From 051a4e7cee2a1e51d7d1ff63406f82b60d326301 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 13:08:24 +0300 Subject: [PATCH 25/35] moved gossip methods to gossip.go --- block/block.go | 60 +++++++---------------------------------------- block/executor.go | 2 +- block/manager.go | 25 +------------------- 3 files changed, 11 insertions(+), 76 deletions(-) diff --git a/block/block.go b/block/block.go index 76b3cd2a8..ae05f6fbb 100644 --- a/block/block.go +++ b/block/block.go @@ -84,6 +84,14 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("commit block: %w", err) } + // Update the state with the new app hash, last validators and store height from the commit. + // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. + newState.SetABCICommitResult(responses, appHash, block.Header.Height) + _, err = m.Store.UpdateState(newState, nil) + if err != nil { + return fmt.Errorf("final update state: %w", err) + } + // Prune old heights, if requested by ABCI app. if retainHeight > 0 { pruned, err := m.pruneBlocks(uint64(retainHeight)) @@ -93,50 +101,14 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) } newState.BaseHeight = m.State.Base() + //TODO: update state } - // Update the state with the new app hash, last validators and store height from the commit. - // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - newState.SetABCICommitResult(responses, appHash, block.Header.Height) - _, err = m.Store.UpdateState(newState, nil) - if err != nil { - return fmt.Errorf("final update state: %w", err) - } m.State = newState return nil } -// TODO: move to gossip.go -func (m *Manager) attemptApplyCachedBlocks() error { - m.retrieverMutex.Lock() - defer m.retrieverMutex.Unlock() - - for { - expectedHeight := m.State.NextHeight() - - cachedBlock, blockExists := m.blockCache[expectedHeight] - if !blockExists { - break - } - if err := m.validateBlock(cachedBlock.Block, cachedBlock.Commit); err != nil { - delete(m.blockCache, cachedBlock.Block.Header.Height) - /// TODO: can we take an action here such as dropping the peer / reducing their reputation? - return fmt.Errorf("block not valid at height %d, dropping it: err:%w", cachedBlock.Block.Header.Height, err) - } - - err := m.applyBlock(cachedBlock.Block, cachedBlock.Commit, blockMetaData{source: gossipedBlock}) - if err != nil { - return fmt.Errorf("apply cached block: expected height: %d: %w", expectedHeight, err) - } - m.logger.Debug("applied cached block", "height", expectedHeight) - - delete(m.blockCache, cachedBlock.Block.Header.Height) - } - - return nil -} - // isHeightAlreadyApplied checks if the block height is already applied to the app. func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { proxyAppInfo, err := m.Executor.GetAppInfo() @@ -158,17 +130,3 @@ func (m *Manager) validateBlock(block *types.Block, commit *types.Commit) error return types.ValidateProposedTransition(m.State, block, commit, proposer) } - -func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { - gossipedBlock := p2p.GossipedBlock{Block: block, Commit: commit} - gossipedBlockBytes, err := gossipedBlock.MarshalBinary() - if err != nil { - return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) - } - if err := m.p2pClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { - // Although this boils down to publishing on a topic, we don't want to speculate too much on what - // could cause that to fail, so we assume recoverable. - return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) - } - return nil -} diff --git a/block/executor.go b/block/executor.go index 206a97ac3..59330322c 100644 --- a/block/executor.go +++ b/block/executor.go @@ -13,7 +13,7 @@ import ( tmtypes "github.com/tendermint/tendermint/types" "go.uber.org/multierr" - libp2pcrypto "github.com/libp2p/go-libp2p-core/crypto" + libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/dymensionxyz/dymint/mempool" "github.com/dymensionxyz/dymint/types" diff --git a/block/manager.go b/block/manager.go index 3cb89ed87..1debbb680 100644 --- a/block/manager.go +++ b/block/manager.go @@ -171,6 +171,7 @@ func (m *Manager) syncBlockManager() error { if errors.Is(err, gerr.ErrNotFound) { // The SL hasn't got any batches for this chain yet. m.logger.Info("No batches for chain found in SL. Start writing first batch.") + //FIXME: set correct syncTarget m.SyncTarget.Store(uint64(m.Genesis.InitialHeight - 1)) return nil } @@ -196,30 +197,6 @@ func (m *Manager) UpdateSyncParams(endHeight uint64) { m.SyncTarget.Store(endHeight) } -// TODO: move to gossip.go -// onNewGossippedBlock will take a block and apply it -func (m *Manager) onNewGossipedBlock(event pubsub.Message) { - m.retrieverMutex.Lock() // needed to protect blockCache access - eventData := event.Data().(p2p.GossipedBlock) - block := eventData.Block - commit := eventData.Commit - m.logger.Debug("Received new block via gossip", "height", block.Header.Height, "n cachedBlocks", len(m.blockCache)) - - nextHeight := m.State.NextHeight() - if block.Header.Height >= nextHeight { - m.blockCache[block.Header.Height] = CachedBlock{ - Block: &block, - Commit: &commit, - } - m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.State.Height()) - } - m.retrieverMutex.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant - err := m.attemptApplyCachedBlocks() - if err != nil { - m.logger.Error("applying cached blocks", "err", err) - } -} - // getInitialState tries to load lastState from Store, and if it's not available it reads GenesisDoc. func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (s types.State, err error) { s, err = store.LoadState() From cecf10615d0f4e15110b26dce525749fc33d487a Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 14:55:41 +0300 Subject: [PATCH 26/35] reverted executer key --- block/block.go | 17 +++-- block/executor.go | 16 ++-- block/executor_test.go | 8 +- block/gossip.go | 76 +++++++++++++++++++ block/initchain.go | 4 +- block/manager.go | 25 ++----- block/manager_test.go | 2 +- block/produce.go | 17 ++--- block/state.go | 137 +++++++++++++++++++---------------- da/avail/avail.go | 2 +- da/grpc/grpc.go | 2 +- da/grpc/mockserv/mockserv.go | 2 +- store/store.go | 2 +- store/storeIface.go | 2 +- store/store_test.go | 2 +- testutil/block.go | 6 +- testutil/mocks.go | 4 +- types/state.go | 53 ++++---------- types/validation.go | 9 +-- 19 files changed, 216 insertions(+), 170 deletions(-) create mode 100644 block/gossip.go diff --git a/block/block.go b/block/block.go index ae05f6fbb..f7f048760 100644 --- a/block/block.go +++ b/block/block.go @@ -62,7 +62,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } m.State = newState - dbBatch, err = m.Store.UpdateState(m.State, dbBatch) + dbBatch, err = m.Store.SaveState(m.State, dbBatch) if err != nil { dbBatch.Discard() return fmt.Errorf("update state: %w", err) @@ -79,15 +79,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } // Commit block to app - appHash, retainHeight, err := m.Executor.Commit(&newState, block, responses) + appHash, retainHeight, err := m.Executor.Commit(newState, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - newState.SetABCICommitResult(responses, appHash, block.Header.Height) - _, err = m.Store.UpdateState(newState, nil) + m.Executor.UpdateStateFromCommitResponse(&newState, responses, appHash, block.Header.Height) + _, err = m.Store.SaveState(newState, nil) if err != nil { return fmt.Errorf("final update state: %w", err) } @@ -100,12 +100,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } else { m.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) } - newState.BaseHeight = m.State.Base() - //TODO: update state + newState.BaseHeight = m.State.BaseHeight + _, err = m.Store.SaveState(newState, nil) + if err != nil { + return fmt.Errorf("final update state: %w", err) + } } - m.State = newState - return nil } diff --git a/block/executor.go b/block/executor.go index 59330322c..3d01fe4c5 100644 --- a/block/executor.go +++ b/block/executor.go @@ -13,8 +13,6 @@ import ( tmtypes "github.com/tendermint/tendermint/types" "go.uber.org/multierr" - libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" - "github.com/dymensionxyz/dymint/mempool" "github.com/dymensionxyz/dymint/types" ) @@ -35,12 +33,7 @@ type Executor struct { // NewExecutor creates new instance of BlockExecutor. // Proposer address and namespace ID will be used in all newly created blocks. -func NewExecutor(proposerKey libp2pcrypto.PrivKey, namespaceID string, chainID string, mempool mempool.Mempool, proxyApp proxy.AppConns, eventBus *tmtypes.EventBus, logger types.Logger) (*Executor, error) { - proposerAddress, err := getAddress(proposerKey) - if err != nil { - return nil, err - } - +func NewExecutor(proposerAddress []byte, namespaceID string, chainID string, mempool mempool.Mempool, proxyApp proxy.AppConns, eventBus *tmtypes.EventBus, logger types.Logger) (*Executor, error) { bytes, err := hex.DecodeString(namespaceID) if err != nil { return nil, err @@ -141,13 +134,14 @@ func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHead } // Commit commits the block -func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { +func (e *Executor) Commit(state types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { return nil, 0, err } - err = e.publishEvents(resp, block, *state) + //FIXME: state is wrong here + err = e.publishEvents(resp, block, state) if err != nil { e.logger.Error("fire block events", "error", err) return nil, 0, err @@ -160,7 +154,7 @@ func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } -func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []*abci.ResponseDeliverTx) ([]byte, int64, error) { +func (e *Executor) commit(state types.State, block *types.Block, deliverTxs []*abci.ResponseDeliverTx) ([]byte, int64, error) { e.mempool.Lock() defer e.mempool.Unlock() diff --git a/block/executor_test.go b/block/executor_test.go index 05aeb936a..f8199c82c 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -185,10 +185,10 @@ func TestApplyBlock(t *testing.T) { newState, err := executor.UpdateStateFromResponses(resp, state, block) require.NoError(err) require.NotNil(newState) - assert.Equal(1, newState.LastBlockHeight) - _, err = executor.Commit(&newState, block, resp) + assert.Equal(uint64(1), newState.LastBlockHeight) + appHash, _, err := executor.Commit(newState, block, resp) require.NoError(err) - assert.Equal(mockAppHash, newState.AppHash) + assert.Equal(mockAppHash, appHash) newState.LastStoreHeight = uint64(newState.LastBlockHeight) // Create another block with multiple Tx from mempool @@ -240,7 +240,7 @@ func TestApplyBlock(t *testing.T) { require.NoError(err) require.NotNil(newState) assert.Equal(uint64(2), newState.LastBlockHeight) - _, err = executor.Commit(&newState, block, resp) + _, _, err = executor.Commit(newState, block, resp) require.NoError(err) // wait for at least 4 Tx events, for up to 3 second. diff --git a/block/gossip.go b/block/gossip.go new file mode 100644 index 000000000..b1a4b1f5f --- /dev/null +++ b/block/gossip.go @@ -0,0 +1,76 @@ +package block + +import ( + "context" + "fmt" + + "github.com/dymensionxyz/dymint/p2p" + "github.com/dymensionxyz/dymint/types" + "github.com/tendermint/tendermint/libs/pubsub" +) + +// onNewGossippedBlock will take a block and apply it +func (m *Manager) onNewGossipedBlock(event pubsub.Message) { + m.retrieverMutex.Lock() // needed to protect blockCache access + eventData := event.Data().(p2p.GossipedBlock) + block := eventData.Block + commit := eventData.Commit + m.logger.Debug("Received new block via gossip", "height", block.Header.Height, "n cachedBlocks", len(m.blockCache)) + + nextHeight := m.State.NextHeight() + if block.Header.Height >= nextHeight { + m.blockCache[block.Header.Height] = CachedBlock{ + Block: &block, + Commit: &commit, + } + m.logger.Debug("caching block", "block height", block.Header.Height, "store height", m.State.Height()) + } + m.retrieverMutex.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant + err := m.attemptApplyCachedBlocks() + if err != nil { + m.logger.Error("applying cached blocks", "err", err) + } +} + +func (m *Manager) attemptApplyCachedBlocks() error { + m.retrieverMutex.Lock() + defer m.retrieverMutex.Unlock() + + for { + expectedHeight := m.State.NextHeight() + + cachedBlock, blockExists := m.blockCache[expectedHeight] + if !blockExists { + break + } + if err := m.validateBlock(cachedBlock.Block, cachedBlock.Commit); err != nil { + delete(m.blockCache, cachedBlock.Block.Header.Height) + /// TODO: can we take an action here such as dropping the peer / reducing their reputation? + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", cachedBlock.Block.Header.Height, err) + } + + err := m.applyBlock(cachedBlock.Block, cachedBlock.Commit, blockMetaData{source: gossipedBlock}) + if err != nil { + return fmt.Errorf("apply cached block: expected height: %d: %w", expectedHeight, err) + } + m.logger.Debug("applied cached block", "height", expectedHeight) + + delete(m.blockCache, cachedBlock.Block.Header.Height) + } + + return nil +} + +func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { + gossipedBlock := p2p.GossipedBlock{Block: block, Commit: commit} + gossipedBlockBytes, err := gossipedBlock.MarshalBinary() + if err != nil { + return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) + } + if err := m.p2pClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { + // Although this boils down to publishing on a topic, we don't want to speculate too much on what + // could cause that to fail, so we assume recoverable. + return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) + } + return nil +} diff --git a/block/initchain.go b/block/initchain.go index c0a0e0b40..727505f38 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -24,8 +24,8 @@ func (m *Manager) RunInitChain(ctx context.Context) error { // update the state with only the consensus pubkey m.Executor.UpdateStateAfterInitChain(&m.State, res, gensisValSet) - m.Executor.UpdateMempoolAfterInitChain(&m.State) - if _, err := m.Store.UpdateState(m.State, nil); err != nil { + m.Executor.UpdateMempoolAfterInitChain(m.State) + if _, err := m.Store.SaveState(m.State, nil); err != nil { return err } diff --git a/block/manager.go b/block/manager.go index 1debbb680..72fbd300d 100644 --- a/block/manager.go +++ b/block/manager.go @@ -86,7 +86,12 @@ func NewManager( p2pClient *p2p.Client, logger types.Logger, ) (*Manager, error) { - exec, err := NewExecutor(proposerKey, conf.NamespaceID, genesis.ChainID, mempool, proxyApp, eventBus, logger) + proposerAddress, err := getAddress(proposerKey) + if err != nil { + return nil, err + } + + exec, err := NewExecutor(proposerAddress, conf.NamespaceID, genesis.ChainID, mempool, proxyApp, eventBus, logger) if err != nil { return nil, fmt.Errorf("create block executor: %w", err) } @@ -170,8 +175,7 @@ func (m *Manager) syncBlockManager() error { res, err := m.SLClient.RetrieveBatch() if errors.Is(err, gerr.ErrNotFound) { // The SL hasn't got any batches for this chain yet. - m.logger.Info("No batches for chain found in SL. Start writing first batch.") - //FIXME: set correct syncTarget + m.logger.Info("No batches for chain found in SL.") m.SyncTarget.Store(uint64(m.Genesis.InitialHeight - 1)) return nil } @@ -196,18 +200,3 @@ func (m *Manager) UpdateSyncParams(endHeight uint64) { m.logger.Info("Received new syncTarget", "syncTarget", endHeight) m.SyncTarget.Store(endHeight) } - -// getInitialState tries to load lastState from Store, and if it's not available it reads GenesisDoc. -func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (s types.State, err error) { - s, err = store.LoadState() - if errors.Is(err, types.ErrNoStateFound) { - logger.Info("failed to find state in the store, creating new state from genesis") - s, err = types.NewFromGenesisDoc(genesis) - } - - if err != nil { - return types.State{}, fmt.Errorf("get initial state: %w", err) - } - - return s, nil -} diff --git a/block/manager_test.go b/block/manager_test.go index 1ee46a5f5..5e1a4b459 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -47,7 +47,7 @@ func TestInitialState(t *testing.T) { // Init empty store and full store emptyStore := store.New(store.NewDefaultInMemoryKVStore()) fullStore := store.New(store.NewDefaultInMemoryKVStore()) - _, err = fullStore.UpdateState(sampleState, nil) + _, err = fullStore.SaveState(sampleState, nil) require.NoError(t, err) // Init p2p client diff --git a/block/produce.go b/block/produce.go index 1aa1b630b..bb1f7f2dc 100644 --- a/block/produce.go +++ b/block/produce.go @@ -19,7 +19,6 @@ import ( func (m *Manager) ProduceBlockLoop(ctx context.Context) { m.logger.Debug("Started produce loop") - // Main ticker for block production ticker := time.NewTicker(m.Conf.BlockTime) defer ticker.Stop() @@ -90,19 +89,14 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) (* func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { var ( - lastCommit *types.Commit - lastHeaderHash [32]byte - newHeight uint64 err error + lastHeaderHash [32]byte + lastCommit = &types.Commit{} + newHeight = m.State.NextHeight() ) - if m.State.IsGenesis() { - newHeight = uint64(m.State.InitialHeight) - lastCommit = &types.Commit{} - m.State.BaseHeight = newHeight - } else { - height := m.State.Height() - newHeight = height + 1 + if !m.State.IsGenesis() { + height := newHeight - 1 lastCommit, err = m.Store.LoadCommit(height) if err != nil { return nil, nil, fmt.Errorf("load commit: height: %d: %w: %w", height, err, ErrNonRecoverable) @@ -190,6 +184,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, } v := vote.ToProto() // convert libp2p key to tm key + //TODO: move to types raw_key, _ := m.ProposerKey.Raw() tmprivkey := tmed25519.PrivKey(raw_key) tmprivkey.PubKey().Bytes() diff --git a/block/state.go b/block/state.go index 1aeea7913..8fd45d063 100644 --- a/block/state.go +++ b/block/state.go @@ -1,6 +1,7 @@ package block import ( + "errors" "fmt" "time" @@ -12,10 +13,24 @@ import ( tmtypes "github.com/tendermint/tendermint/types" "github.com/dymensionxyz/dymint/mempool" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" ) -// TODO: move all those methods from blockExecutor to manager +// getInitialState tries to load lastState from Store, and if it's not available it reads GenesisDoc. +func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (s types.State, err error) { + s, err = store.LoadState() + if errors.Is(err, types.ErrNoStateFound) { + logger.Info("failed to find state in the store, creating new state from genesis") + s, err = types.NewStateFromGenesis(genesis) + } + + if err != nil { + return types.State{}, fmt.Errorf("get initial state: %w", err) + } + + return s, nil +} // UpdateStateFromApp is responsible for aligning the state of the store from the abci app func (m *Manager) UpdateStateFromApp() error { @@ -25,75 +40,27 @@ func (m *Manager) UpdateStateFromApp() error { } appHeight := uint64(proxyAppInfo.LastBlockHeight) + resp, err := m.Store.LoadBlockResponses(appHeight) + if err != nil { + return errorsmod.Wrap(err, "load block responses") + } // update the state with the hash, last store height and last validators. + //TODO: DRY with the post commit update m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) m.State.LastStoreHeight = appHeight m.State.LastValidators = m.State.Validators.Copy() - resp, err := m.Store.LoadBlockResponses(appHeight) - if err != nil { - return errorsmod.Wrap(err, "load block responses") - } copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + m.State.SetHeight(appHeight) - if ok := m.State.SetHeight(appHeight); !ok { - return fmt.Errorf("state set height: %d", appHeight) - } - _, err = m.Store.UpdateState(m.State, nil) + _, err = m.Store.SaveState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") } return nil } -func (e *Executor) updateState(state types.State, block *types.Block, abciResponses *tmstate.ABCIResponses, validatorUpdates []*tmtypes.Validator) (types.State, error) { - nValSet := state.NextValidators.Copy() - lastHeightValSetChanged := state.LastHeightValidatorsChanged - // Dymint can work without validators - if len(nValSet.Validators) > 0 { - if len(validatorUpdates) > 0 { - err := nValSet.UpdateWithChangeSet(validatorUpdates) - if err != nil { - return state, nil - } - // Change results from this height but only applies to the next next height. - lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) - } - - // TODO(tzdybal): right now, it's for backward compatibility, may need to change this - nValSet.IncrementProposerPriority(1) - } - - hash := block.Header.Hash() - // TODO: we can probably pass the state as a pointer and update it directly - s := types.State{ - Version: state.Version, - ChainID: state.ChainID, - InitialHeight: state.InitialHeight, - LastBlockHeight: block.Header.Height, - LastBlockTime: time.Unix(0, int64(block.Header.Time)), - LastBlockID: tmtypes.BlockID{ - Hash: hash[:], - // for now, we don't care about part set headers - }, - NextValidators: nValSet, - Validators: state.NextValidators.Copy(), - LastHeightValidatorsChanged: lastHeightValSetChanged, - ConsensusParams: state.ConsensusParams, - LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged, - // We're gonna update those fields only after we commit the blocks - AppHash: state.AppHash, - LastValidators: state.LastValidators.Copy(), - LastStoreHeight: state.LastStoreHeight, - - LastResultsHash: state.LastResultsHash, - BaseHeight: state.BaseHeight, - } - - return s, nil -} - func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseInitChain, validators []*tmtypes.Validator) { // If the app did not return an app hash, we keep the one set from the genesis doc in // the state. We don't set appHash since we don't want the genesis doc app hash @@ -137,7 +104,7 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.LastValidators = s.Validators.Copy() } -func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { +func (e *Executor) UpdateMempoolAfterInitChain(s types.State) { e.mempool.SetPreCheckFn(mempool.PreCheckMaxBytes(s.ConsensusParams.Block.MaxBytes)) e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } @@ -152,10 +119,58 @@ func (e *Executor) UpdateStateFromResponses(resp *tmstate.ABCIResponses, state t e.logger.Error("maxBytes=0", "state.ConsensusParams.Block", state.ConsensusParams.Block) } - state, err := e.updateState(state, block, resp, validatorUpdates) - if err != nil { - return types.State{}, err + nValSet := state.NextValidators.Copy() + lastHeightValSetChanged := state.LastHeightValidatorsChanged + // Dymint can work without validators + if len(nValSet.Validators) > 0 { + if len(validatorUpdates) > 0 { + err := nValSet.UpdateWithChangeSet(validatorUpdates) + if err != nil { + return state, nil + } + // Change results from this height but only applies to the next next height. + lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) + } + + // TODO(tzdybal): right now, it's for backward compatibility, may need to change this + nValSet.IncrementProposerPriority(1) + } + + hash := block.Header.Hash() + // TODO: we can probably pass the state as a pointer and update it directly + s := types.State{ + Version: state.Version, + ChainID: state.ChainID, + InitialHeight: state.InitialHeight, + LastBlockHeight: block.Header.Height, + LastBlockTime: time.Unix(0, int64(block.Header.Time)), + LastBlockID: tmtypes.BlockID{ + Hash: hash[:], + // for now, we don't care about part set headers + }, + NextValidators: nValSet, + Validators: state.NextValidators.Copy(), + LastHeightValidatorsChanged: lastHeightValSetChanged, + ConsensusParams: state.ConsensusParams, + LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged, + // We're gonna update those fields only after we commit the blocks + AppHash: state.AppHash, + LastValidators: state.LastValidators.Copy(), + LastStoreHeight: state.LastStoreHeight, + + LastResultsHash: state.LastResultsHash, + BaseHeight: state.BaseHeight, } - return state, nil + return s, nil +} + +// Update state from Commit response +func (e *Executor) UpdateStateFromCommitResponse(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64) { + copy(s.AppHash[:], appHash[:]) + copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + + s.LastValidators = s.Validators.Copy() + s.LastStoreHeight = height + s.SetHeight(height) } diff --git a/da/avail/avail.go b/da/avail/avail.go index e6fe41d70..e5b20eabd 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -10,7 +10,6 @@ import ( "github.com/avast/retry-go/v4" "github.com/gogo/protobuf/proto" - "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" @@ -20,6 +19,7 @@ import ( "github.com/centrifuge/go-substrate-rpc-client/v4/signature" availtypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" "github.com/dymensionxyz/dymint/da" + "github.com/dymensionxyz/dymint/store" pb "github.com/dymensionxyz/dymint/types/pb/dymint" "github.com/tendermint/tendermint/libs/pubsub" ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index aff6da5a4..85c87d6ff 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -5,11 +5,11 @@ import ( "encoding/json" "strconv" - "github.com/dymensionxyz/dymint/store" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "github.com/dymensionxyz/dymint/da" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/pb/dalc" "github.com/tendermint/tendermint/libs/pubsub" diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index 3fa7ee0b8..a467dfe01 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -4,13 +4,13 @@ import ( "context" "os" - "github.com/dymensionxyz/dymint/store" tmlog "github.com/tendermint/tendermint/libs/log" "google.golang.org/grpc" "github.com/dymensionxyz/dymint/da" grpcda "github.com/dymensionxyz/dymint/da/grpc" "github.com/dymensionxyz/dymint/da/local" + "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" "github.com/dymensionxyz/dymint/types/pb/dalc" "github.com/dymensionxyz/dymint/types/pb/dymint" diff --git a/store/store.go b/store/store.go index 96cf1319a..0c38b5cbb 100644 --- a/store/store.go +++ b/store/store.go @@ -174,7 +174,7 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { // UpdateState updates state saved in Store. Only one State is stored. // If there is no State in Store, state will be saved. -func (s *DefaultStore) UpdateState(state types.State, batch Batch) (Batch, error) { +func (s *DefaultStore) SaveState(state types.State, batch Batch) (Batch, error) { pbState, err := state.ToProto() if err != nil { return batch, fmt.Errorf("marshal state to JSON: %w", err) diff --git a/store/storeIface.go b/store/storeIface.go index 051fc4316..1be8ec1f5 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -61,7 +61,7 @@ type Store interface { // UpdateState updates state saved in Store. Only one State is stored. // If there is no State in Store, state will be saved. - UpdateState(state types.State, batch Batch) (Batch, error) + SaveState(state types.State, batch Batch) (Batch, error) // LoadState returns last state saved with UpdateState. LoadState() (types.State, error) diff --git a/store/store_test.go b/store/store_test.go index ffd9d51ff..22a6d7510 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -93,7 +93,7 @@ func TestLoadState(t *testing.T) { kv := store.NewDefaultInMemoryKVStore() s1 := store.New(kv) expectedHeight := uint64(10) - _, err := s1.UpdateState(types.State{ + _, err := s1.SaveState(types.State{ LastBlockHeight: expectedHeight, LastStoreHeight: uint64(expectedHeight), NextValidators: validatorSet, diff --git a/testutil/block.go b/testutil/block.go index b809b0181..195faa6c0 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -32,7 +32,7 @@ const ( /* -------------------------------------------------------------------------- */ /* utils */ /* -------------------------------------------------------------------------- */ -func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.LayerI, dalc da.DataAvailabilityLayerClient, genesisHeight int64, storeInitialHeight int64, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { +func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.LayerI, dalc da.DataAvailabilityLayerClient, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { genesis := GenerateGenesis(genesisHeight) // Change the LastBlockHeight to avoid calling InitChainSync within the manager // And updating the state according to the genesis. @@ -43,7 +43,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt } else { managerStore = mockStore } - if _, err := managerStore.UpdateState(state, nil); err != nil { + if _, err := managerStore.SaveState(state, nil); err != nil { return nil, err } @@ -113,7 +113,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt return manager, nil } -func GetManager(conf config.BlockManagerConfig, settlementlc settlement.LayerI, dalc da.DataAvailabilityLayerClient, genesisHeight int64, storeInitialHeight int64, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { +func GetManager(conf config.BlockManagerConfig, settlementlc settlement.LayerI, dalc da.DataAvailabilityLayerClient, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { proposerKey, _, err := crypto.GenerateEd25519Key(rand.Reader) if err != nil { return nil, err diff --git a/testutil/mocks.go b/testutil/mocks.go index 3cb3a3388..1c633a3e2 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -112,11 +112,11 @@ func (m *MockStore) NextHeight() uint64 { } // UpdateState updates the state of the mock store -func (m *MockStore) UpdateState(state types.State, batch store.Batch) (store.Batch, error) { +func (m *MockStore) SaveState(state types.State, batch store.Batch) (store.Batch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailUpdateState && batch == nil { return nil, errors.New("failed to update state") } - return m.DefaultStore.UpdateState(state, batch) + return m.DefaultStore.SaveState(state, batch) } // NewMockStore returns a new mock store diff --git a/types/state.go b/types/state.go index 056d22d31..02666fc56 100644 --- a/types/state.go +++ b/types/state.go @@ -2,11 +2,8 @@ package types import ( "fmt" - "sync/atomic" "time" - tmtypes "github.com/tendermint/tendermint/types" - // TODO(tzdybal): copy to local project? tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -27,8 +24,9 @@ type State struct { LastBlockHeight uint64 LastBlockID types.BlockID LastBlockTime time.Time + // BaseHeight is the height of the first block we have in store after pruning. + BaseHeight uint64 - // In the MVP implementation, there will be only one Validator NextValidators *types.ValidatorSet Validators *types.ValidatorSet LastValidators *types.ValidatorSet @@ -45,25 +43,17 @@ type State struct { // LastStore height is the last height we've saved to the store. LastStoreHeight uint64 - // BaseHeight is the height of the first block we have in store after pruning. - BaseHeight uint64 - // the latest AppHash we've received from calling abci.Commit() AppHash [32]byte } -// FIXME: move from types package -// NewFromGenesisDoc reads blockchain State from genesis. -func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { +// NewStateFromGenesis reads blockchain State from genesis. +func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { err := genDoc.ValidateAndComplete() if err != nil { return State{}, fmt.Errorf("in genesis doc: %w", err) } - var validatorSet, nextValidatorSet *types.ValidatorSet - validatorSet = types.NewValidatorSet(nil) - nextValidatorSet = types.NewValidatorSet(nil) - // InitStateVersion sets the Consensus.Block and Software versions, // but leaves the Consensus.App version blank. // The Consensus.App version will be set during the Handshake, once @@ -83,17 +73,16 @@ func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { LastBlockHeight: 0, LastBlockID: types.BlockID{}, - LastBlockTime: genDoc.GenesisTime, + LastBlockTime: time.Time{}, + BaseHeight: uint64(genDoc.InitialHeight), - NextValidators: nextValidatorSet, - Validators: validatorSet, + NextValidators: types.NewValidatorSet(nil), + Validators: types.NewValidatorSet(nil), LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: genDoc.InitialHeight, ConsensusParams: *genDoc.ConsensusParams, LastHeightConsensusParamsChanged: genDoc.InitialHeight, - - BaseHeight: 0, } copy(s.AppHash[:], genDoc.AppHash) @@ -101,27 +90,25 @@ func NewFromGenesisDoc(genDoc *types.GenesisDoc) (State, error) { } func (s *State) IsGenesis() bool { - return s.LastBlockHeight == 0 + return s.Height() == 0 } // SetHeight sets the height saved in the Store if it is higher than the existing height // returns OK if the value was updated successfully or did not need to be updated -func (s *State) SetHeight(height uint64) bool { - ok := true - storeHeight := s.Height() - if height > storeHeight { - ok = atomic.CompareAndSwapUint64(&s.LastBlockHeight, storeHeight, height) - } - return ok +func (s *State) SetHeight(height uint64) { + s.LastBlockHeight = height } // Height returns height of the highest block saved in the Store. func (s *State) Height() uint64 { - return uint64(s.LastBlockHeight) + return s.LastBlockHeight } // NextHeight returns the next height that expected to be stored in store. func (s *State) NextHeight() uint64 { + if s.IsGenesis() { + return s.InitialHeight + } return s.Height() + 1 } @@ -135,13 +122,3 @@ func (s *State) SetBase(height uint64) { func (s *State) Base() uint64 { return s.BaseHeight } - -// SetABCICommitResult -func (s *State) SetABCICommitResult(resp *tmstate.ABCIResponses, appHash []byte, height uint64) { - copy(s.AppHash[:], appHash[:]) - copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - - s.LastValidators = s.Validators.Copy() - s.LastStoreHeight = height - s.SetHeight(height) -} diff --git a/types/validation.go b/types/validation.go index 9533d7e7b..b1e2faf6e 100644 --- a/types/validation.go +++ b/types/validation.go @@ -48,12 +48,11 @@ func (b *Block) ValidateWithState(state State) error { b.Header.Version.Block != state.Version.Consensus.Block { return errors.New("b version mismatch") } - if state.LastBlockHeight <= 0 && b.Header.Height != uint64(state.InitialHeight) { - return errors.New("initial b height mismatch") - } - if state.LastBlockHeight > 0 && b.Header.Height != uint64(state.LastStoreHeight)+1 { - return errors.New("b height mismatch") + + if b.Header.Height != state.NextHeight() { + return errors.New("height mismatch") } + if !bytes.Equal(b.Header.AppHash[:], state.AppHash[:]) { return errors.New("AppHash mismatch") } From b896eaa1bac76f34f588671adb8e3b70e6ddc481 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 15:21:31 +0300 Subject: [PATCH 27/35] fixed publishEvents --- block/executor.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/block/executor.go b/block/executor.go index 3d01fe4c5..e40542b5e 100644 --- a/block/executor.go +++ b/block/executor.go @@ -140,8 +140,7 @@ func (e *Executor) Commit(state types.State, block *types.Block, resp *tmstate.A return nil, 0, err } - //FIXME: state is wrong here - err = e.publishEvents(resp, block, state) + err = e.publishEvents(resp, block) if err != nil { e.logger.Error("fire block events", "error", err) return nil, 0, err @@ -250,13 +249,12 @@ func (e *Executor) getDataHash(block *types.Block) []byte { return abciData.Hash() } -func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block, state types.State) error { +func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block) error { if e.eventBus == nil { return nil } abciBlock, err := types.ToABCIBlock(block) - abciBlock.Header.ValidatorsHash = state.Validators.Hash() if err != nil { return err } From bb66c2c8464dfa2f302c5a8262507feaf2567e42 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 20:49:16 +0300 Subject: [PATCH 28/35] removed unused fields. saving state post commit --- .gitignore | 3 ++- block/block.go | 22 +++++++----------- block/executor_test.go | 26 ++++++++++----------- block/state.go | 52 +++++++++++++----------------------------- types/serialization.go | 17 +------------- types/state.go | 9 +------- 6 files changed, 41 insertions(+), 88 deletions(-) diff --git a/.gitignore b/.gitignore index c046ac465..3004a5b74 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,5 @@ proto/pb .go-version build -vendor/ \ No newline at end of file +vendor/ +da/grpc/mockserv/db/ diff --git a/block/block.go b/block/block.go index f7f048760..a75b20209 100644 --- a/block/block.go +++ b/block/block.go @@ -10,7 +10,7 @@ import ( // applyBlock applies the block to the store and the abci app. // Contract: block and commit must be validated before calling this function! -// steps: save block -> execute block with app -> update state -> commit block to app -> update store height and state hash. +// steps: save block -> execute block with app -> update state -> commit block to app -> update state's height and commit result. // As the entire process can't be atomic we need to make sure the following condition apply before // - block height is the expected block height on the store (height + 1). // - block height is the expected block height on the app (last block height + 1). @@ -49,7 +49,8 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("execute block: %w", err) } - newState, err := m.Executor.UpdateStateFromResponses(responses, m.State, block) + // Updates the state with validator changes and consensus params changes from the app + err = m.Executor.UpdateStateFromResponses(&m.State, responses, block) if err != nil { return fmt.Errorf("update state from responses: %w", err) } @@ -61,12 +62,6 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("save block responses: %w", err) } - m.State = newState - dbBatch, err = m.Store.SaveState(m.State, dbBatch) - if err != nil { - dbBatch.Discard() - return fmt.Errorf("update state: %w", err) - } dbBatch, err = m.Store.SaveValidators(block.Header.Height, m.State.Validators, dbBatch) if err != nil { dbBatch.Discard() @@ -79,15 +74,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } // Commit block to app - appHash, retainHeight, err := m.Executor.Commit(newState, block, responses) + appHash, retainHeight, err := m.Executor.Commit(m.State, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - m.Executor.UpdateStateFromCommitResponse(&newState, responses, appHash, block.Header.Height) - _, err = m.Store.SaveState(newState, nil) + m.Executor.UpdateStateFromCommitResponse(&m.State, responses, appHash, block.Header.Height) + _, err = m.Store.SaveState(m.State, nil) if err != nil { return fmt.Errorf("final update state: %w", err) } @@ -100,13 +95,12 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta } else { m.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) } - newState.BaseHeight = m.State.BaseHeight - _, err = m.Store.SaveState(newState, nil) + m.State.BaseHeight = m.State.BaseHeight + _, err = m.Store.SaveState(m.State, nil) if err != nil { return fmt.Errorf("final update state: %w", err) } } - m.State = newState return nil } diff --git a/block/executor_test.go b/block/executor_test.go index f8199c82c..7d689a6a2 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -182,21 +182,21 @@ func TestApplyBlock(t *testing.T) { resp, err := executor.ExecuteBlock(state, block) require.NoError(err) require.NotNil(resp) - newState, err := executor.UpdateStateFromResponses(resp, state, block) + err = executor.UpdateStateFromResponses(&state, resp, block) require.NoError(err) - require.NotNil(newState) - assert.Equal(uint64(1), newState.LastBlockHeight) - appHash, _, err := executor.Commit(newState, block, resp) + require.NotNil(state) + assert.Equal(uint64(1), state.LastBlockHeight) + appHash, _, err := executor.Commit(state, block, resp) require.NoError(err) - assert.Equal(mockAppHash, appHash) - newState.LastStoreHeight = uint64(newState.LastBlockHeight) + executor.UpdateStateFromCommitResponse(&state, resp, appHash, block.Header.Height) + assert.Equal(mockAppHash, state.AppHash) // Create another block with multiple Tx from mempool require.NoError(mpool.CheckTx([]byte{0, 1, 2, 3, 4}, func(r *abci.Response) {}, mempool.TxInfo{})) require.NoError(mpool.CheckTx([]byte{5, 6, 7, 8, 9}, func(r *abci.Response) {}, mempool.TxInfo{})) require.NoError(mpool.CheckTx([]byte{1, 2, 3, 4, 5}, func(r *abci.Response) {}, mempool.TxInfo{})) require.NoError(mpool.CheckTx(make([]byte, 90), func(r *abci.Response) {}, mempool.TxInfo{})) - block = executor.CreateBlock(2, commit, [32]byte{}, newState, maxBytes) + block = executor.CreateBlock(2, commit, [32]byte{}, state, maxBytes) require.NotNil(block) assert.Equal(uint64(2), block.Header.Height) assert.Len(block.Data.Txs, 3) @@ -217,7 +217,7 @@ func TestApplyBlock(t *testing.T) { } // Apply the block with an invalid commit - err = types.ValidateProposedTransition(newState, block, invalidCommit, proposer) + err = types.ValidateProposedTransition(state, block, invalidCommit, proposer) require.ErrorIs(err, types.ErrInvalidSignature) @@ -231,16 +231,16 @@ func TestApplyBlock(t *testing.T) { } // Apply the block - err = types.ValidateProposedTransition(newState, block, commit, proposer) + err = types.ValidateProposedTransition(state, block, commit, proposer) require.NoError(err) resp, err = executor.ExecuteBlock(state, block) require.NoError(err) require.NotNil(resp) - newState, err = executor.UpdateStateFromResponses(resp, state, block) + err = executor.UpdateStateFromResponses(&state, resp, block) require.NoError(err) - require.NotNil(newState) - assert.Equal(uint64(2), newState.LastBlockHeight) - _, _, err = executor.Commit(newState, block, resp) + require.NotNil(state) + assert.Equal(uint64(2), state.LastBlockHeight) + _, _, err = executor.Commit(state, block, resp) require.NoError(err) // wait for at least 4 Tx events, for up to 3 second. diff --git a/block/state.go b/block/state.go index 8fd45d063..d580d7f1e 100644 --- a/block/state.go +++ b/block/state.go @@ -3,7 +3,6 @@ package block import ( "errors" "fmt" - "time" errorsmod "cosmossdk.io/errors" @@ -45,15 +44,21 @@ func (m *Manager) UpdateStateFromApp() error { return errorsmod.Wrap(err, "load block responses") } + vals, err := m.Store.LoadValidators(appHeight) + if err != nil { + return errorsmod.Wrap(err, "load block responses") + } + // update the state with the hash, last store height and last validators. //TODO: DRY with the post commit update m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) - m.State.LastStoreHeight = appHeight - m.State.LastValidators = m.State.Validators.Copy() - + m.State.Validators = m.State.NextValidators.Copy() + m.State.NextValidators = vals copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) m.State.SetHeight(appHeight) + //FIXME: load consensus params + _, err = m.Store.SaveState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") @@ -101,7 +106,6 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI // Set the validators in the state s.Validators = tmtypes.NewValidatorSet(validators).CopyIncrementProposerPriority(1) s.NextValidators = s.Validators.Copy() - s.LastValidators = s.Validators.Copy() } func (e *Executor) UpdateMempoolAfterInitChain(s types.State) { @@ -110,7 +114,7 @@ func (e *Executor) UpdateMempoolAfterInitChain(s types.State) { } // UpdateStateFromResponses updates state based on the ABCIResponses. -func (e *Executor) UpdateStateFromResponses(resp *tmstate.ABCIResponses, state types.State, block *types.Block) (types.State, error) { +func (e *Executor) UpdateStateFromResponses(state *types.State, resp *tmstate.ABCIResponses, block *types.Block) error { // Dymint ignores any setValidator responses from the app, as it is manages the validator set based on the settlement consensus // TODO: this will be changed when supporting multiple sequencers from the hub validatorUpdates := []*tmtypes.Validator{} @@ -126,7 +130,7 @@ func (e *Executor) UpdateStateFromResponses(resp *tmstate.ABCIResponses, state t if len(validatorUpdates) > 0 { err := nValSet.UpdateWithChangeSet(validatorUpdates) if err != nil { - return state, nil + return err } // Change results from this height but only applies to the next next height. lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) @@ -136,41 +140,17 @@ func (e *Executor) UpdateStateFromResponses(resp *tmstate.ABCIResponses, state t nValSet.IncrementProposerPriority(1) } - hash := block.Header.Hash() - // TODO: we can probably pass the state as a pointer and update it directly - s := types.State{ - Version: state.Version, - ChainID: state.ChainID, - InitialHeight: state.InitialHeight, - LastBlockHeight: block.Header.Height, - LastBlockTime: time.Unix(0, int64(block.Header.Time)), - LastBlockID: tmtypes.BlockID{ - Hash: hash[:], - // for now, we don't care about part set headers - }, - NextValidators: nValSet, - Validators: state.NextValidators.Copy(), - LastHeightValidatorsChanged: lastHeightValSetChanged, - ConsensusParams: state.ConsensusParams, - LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged, - // We're gonna update those fields only after we commit the blocks - AppHash: state.AppHash, - LastValidators: state.LastValidators.Copy(), - LastStoreHeight: state.LastStoreHeight, - - LastResultsHash: state.LastResultsHash, - BaseHeight: state.BaseHeight, - } + state.Validators = state.NextValidators.Copy() + state.NextValidators = nValSet + state.LastHeightValidatorsChanged = lastHeightValSetChanged - return s, nil + return nil } // Update state from Commit response func (e *Executor) UpdateStateFromCommitResponse(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64) { + // validators already set on UpdateStateFromResponses copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - - s.LastValidators = s.Validators.Copy() - s.LastStoreHeight = height s.SetHeight(height) } diff --git a/types/serialization.go b/types/serialization.go index 999f506cd..f4627657e 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -261,12 +261,9 @@ func (s *State) ToProto() (*pb.State, error) { ChainId: s.ChainID, InitialHeight: int64(s.InitialHeight), LastBlockHeight: int64(s.LastBlockHeight), - LastBlockID: s.LastBlockID.ToProto(), - LastBlockTime: s.LastBlockTime, NextValidators: nextValidators, Validators: validators, LastValidators: lastValidators, - LastStoreHeight: s.LastStoreHeight, BaseHeight: s.BaseHeight, LastHeightValidatorsChanged: s.LastHeightValidatorsChanged, ConsensusParams: s.ConsensusParams, @@ -283,20 +280,8 @@ func (s *State) FromProto(other *pb.State) error { s.ChainID = other.ChainId s.InitialHeight = uint64(other.InitialHeight) s.LastBlockHeight = uint64(other.LastBlockHeight) - // TODO(omritoptix): remove this as this is only for backwards compatibility - // with old state files that don't have this field. - if other.LastStoreHeight == 0 && other.LastBlockHeight > 1 { - s.LastStoreHeight = uint64(other.LastBlockHeight) - } else { - s.LastStoreHeight = other.LastStoreHeight - } s.BaseHeight = other.BaseHeight - lastBlockID, err := types.BlockIDFromProto(&other.LastBlockID) - if err != nil { - return err - } - s.LastBlockID = *lastBlockID - s.LastBlockTime = other.LastBlockTime + s.NextValidators, err = types.ValidatorSetFromProto(other.NextValidators) if err != nil { return err diff --git a/types/state.go b/types/state.go index 02666fc56..8f302a604 100644 --- a/types/state.go +++ b/types/state.go @@ -2,7 +2,6 @@ package types import ( "fmt" - "time" // TODO(tzdybal): copy to local project? tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -22,8 +21,7 @@ type State struct { // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight uint64 - LastBlockID types.BlockID - LastBlockTime time.Time + // BaseHeight is the height of the first block we have in store after pruning. BaseHeight uint64 @@ -40,9 +38,6 @@ type State struct { // Merkle root of the results from executing prev block LastResultsHash [32]byte - // LastStore height is the last height we've saved to the store. - LastStoreHeight uint64 - // the latest AppHash we've received from calling abci.Commit() AppHash [32]byte } @@ -72,8 +67,6 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { InitialHeight: uint64(genDoc.InitialHeight), LastBlockHeight: 0, - LastBlockID: types.BlockID{}, - LastBlockTime: time.Time{}, BaseHeight: uint64(genDoc.InitialHeight), NextValidators: types.NewValidatorSet(nil), From efcf1931a72e2a0a2a61402b1b97707e8ae0bbfb Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Sun, 12 May 2024 22:34:26 +0300 Subject: [PATCH 29/35] removed unused params. cleaned the state flow --- block/block.go | 35 ++++++----- block/executor_test.go | 15 ++--- block/manager_test.go | 112 +++++++++++++++++------------------- block/pruning.go | 9 +++ block/state.go | 72 ++++++++++------------- node/node.go | 8 +-- rpc/client/client_test.go | 4 +- store/store_test.go | 2 - testutil/mocks.go | 12 ++-- testutil/types.go | 7 +-- types/serialization.go | 9 --- types/serialization_test.go | 18 +----- types/state.go | 3 +- 13 files changed, 136 insertions(+), 170 deletions(-) diff --git a/block/block.go b/block/block.go index a75b20209..25a4e18ca 100644 --- a/block/block.go +++ b/block/block.go @@ -31,6 +31,8 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // In case the following true, it means we crashed after the commit and before updating the store height. // In that case we'll want to align the store with the app state and continue to the next block. if isBlockAlreadyApplied { + // In this case, where the app was committed, but the state wasn't updated + // it will update the state from appInfo, saved responses and validators. err := m.UpdateStateFromApp() if err != nil { return fmt.Errorf("update state from app: %w", err) @@ -49,12 +51,6 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("execute block: %w", err) } - // Updates the state with validator changes and consensus params changes from the app - err = m.Executor.UpdateStateFromResponses(&m.State, responses, block) - if err != nil { - return fmt.Errorf("update state from responses: %w", err) - } - dbBatch := m.Store.NewBatch() dbBatch, err = m.Store.SaveBlockResponses(block.Header.Height, responses, dbBatch) if err != nil { @@ -62,7 +58,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("save block responses: %w", err) } - dbBatch, err = m.Store.SaveValidators(block.Header.Height, m.State.Validators, dbBatch) + // Updates the state with validator changes and consensus params changes from the app + validators, err := m.Executor.NextValSetFromResponses(m.State, responses, block) + if err != nil { + return fmt.Errorf("update state from responses: %w", err) + } + + dbBatch, err = m.Store.SaveValidators(block.Header.Height, validators, dbBatch) if err != nil { dbBatch.Discard() return fmt.Errorf("save validators: %w", err) @@ -79,26 +81,23 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("commit block: %w", err) } + // If failed here, after the app committed, but before the state is updated, we'll update the state on + // UpdateStateFromApp using the saved responses and validators. + // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - m.Executor.UpdateStateFromCommitResponse(&m.State, responses, appHash, block.Header.Height) - _, err = m.Store.SaveState(m.State, nil) + newState := m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, validators) + _, err = m.Store.SaveState(newState, nil) if err != nil { - return fmt.Errorf("final update state: %w", err) + return fmt.Errorf("update state: %w", err) } + m.State = newState // Prune old heights, if requested by ABCI app. if retainHeight > 0 { - pruned, err := m.pruneBlocks(uint64(retainHeight)) + _, err := m.pruneBlocks(uint64(retainHeight)) if err != nil { m.logger.Error("prune blocks", "retain_height", retainHeight, "err", err) - } else { - m.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight) - } - m.State.BaseHeight = m.State.BaseHeight - _, err = m.Store.SaveState(m.State, nil) - if err != nil { - return fmt.Errorf("final update state: %w", err) } } return nil diff --git a/block/executor_test.go b/block/executor_test.go index 7d689a6a2..dccbb1b99 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -143,7 +143,6 @@ func TestApplyBlock(t *testing.T) { state := types.State{ NextValidators: tmtypes.NewValidatorSet(nil), Validators: tmtypes.NewValidatorSet(nil), - LastValidators: tmtypes.NewValidatorSet(nil), } state.InitialHeight = 1 state.LastBlockHeight = 0 @@ -182,13 +181,10 @@ func TestApplyBlock(t *testing.T) { resp, err := executor.ExecuteBlock(state, block) require.NoError(err) require.NotNil(resp) - err = executor.UpdateStateFromResponses(&state, resp, block) - require.NoError(err) - require.NotNil(state) - assert.Equal(uint64(1), state.LastBlockHeight) appHash, _, err := executor.Commit(state, block, resp) require.NoError(err) - executor.UpdateStateFromCommitResponse(&state, resp, appHash, block.Header.Height) + state = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, state.Validators) + assert.Equal(uint64(1), state.Height()) assert.Equal(mockAppHash, state.AppHash) // Create another block with multiple Tx from mempool @@ -236,12 +232,13 @@ func TestApplyBlock(t *testing.T) { resp, err = executor.ExecuteBlock(state, block) require.NoError(err) require.NotNil(resp) - err = executor.UpdateStateFromResponses(&state, resp, block) + vals, err := executor.NextValSetFromResponses(state, resp, block) require.NoError(err) - require.NotNil(state) - assert.Equal(uint64(2), state.LastBlockHeight) _, _, err = executor.Commit(state, block, resp) require.NoError(err) + state = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, vals) + + assert.Equal(uint64(2), state.Height()) // wait for at least 4 Tx events, for up to 3 second. // 3 seconds is a fail-scenario only diff --git a/block/manager_test.go b/block/manager_test.go index 5e1a4b459..cf048b0be 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -202,16 +202,17 @@ func TestProducePendingBlock(t *testing.T) { manager, err := testutil.GetManager(testutil.GetManagerConfig(), nil, nil, 1, 1, 0, proxyApp, nil) require.NoError(t, err) // Generate block and commit and save it to the store - blocks, err := testutil.GenerateBlocks(1, 1, manager.ProposerKey) - require.NoError(t, err) - block := blocks[0] + block := testutil.GetRandomBlock(1, 3) _, err = manager.Store.SaveBlock(block, &block.LastCommit, nil) require.NoError(t, err) // Produce block _, _, err = manager.ProduceAndGossipBlock(context.Background(), true) require.NoError(t, err) // Validate state is updated with the block that was saved in the store - assert.Equal(t, block.Header.Hash(), *(*[32]byte)(manager.State.LastBlockID.Hash)) + + //TODO: fix this test + //hacky way to validate the block was indeed contain txs + assert.NotEqual(t, manager.State.LastResultsHash, testutil.GetEmptyLastResultsHash()) } // Test that in case we fail after the proxy app commit, next time we won't commit again to the proxy app @@ -223,6 +224,7 @@ func TestProducePendingBlock(t *testing.T) { // 5. Produce third block successfully func TestProduceBlockFailAfterCommit(t *testing.T) { require := require.New(t) + assert := assert.New(t) // Setup app app := testutil.GetAppMock(testutil.Info, testutil.Commit) // Create proxy app @@ -237,64 +239,60 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { require.NoError(err) cases := []struct { - name string - shouldFailSetSetHeight bool - shouldFailUpdateState bool - LastAppBlockHeight int64 - AppCommitHash [32]byte - LastAppCommitHash [32]byte - expectedStoreHeight uint64 - expectedStateAppHash [32]byte + name string + shoudFailSaveState bool + LastAppBlockHeight int64 + AppCommitHash [32]byte + LastAppCommitHash [32]byte + expectedStoreHeight uint64 + expectedStateAppHash [32]byte }{ { - name: "ProduceFirstBlockSuccessfully", - shouldFailSetSetHeight: false, - shouldFailUpdateState: false, - AppCommitHash: [32]byte{1}, - LastAppCommitHash: [32]byte{0}, - LastAppBlockHeight: 0, - expectedStoreHeight: 1, - expectedStateAppHash: [32]byte{1}, + name: "ProduceFirstBlockSuccessfully", + shoudFailSaveState: false, + AppCommitHash: [32]byte{1}, + // LastAppCommitHash: [32]byte{0}, + // LastAppBlockHeight: 0, + expectedStoreHeight: 1, + expectedStateAppHash: [32]byte{1}, }, { - name: "ProduceSecondBlockFailOnUpdateState", - shouldFailSetSetHeight: false, - shouldFailUpdateState: true, - AppCommitHash: [32]byte{2}, - LastAppCommitHash: [32]byte{}, - LastAppBlockHeight: 0, - expectedStoreHeight: 1, - expectedStateAppHash: [32]byte{1}, + name: "ProduceSecondBlockFailOnUpdateState", + shoudFailSaveState: true, + AppCommitHash: [32]byte{2}, + // LastAppCommitHash: [32]byte{}, + // LastAppBlockHeight: 0, + // state not changed on failed save state + expectedStoreHeight: 1, + expectedStateAppHash: [32]byte{1}, }, { - name: "ProduceSecondBlockSuccessfully", - shouldFailSetSetHeight: false, - shouldFailUpdateState: false, - AppCommitHash: [32]byte{}, - LastAppCommitHash: [32]byte{2}, - LastAppBlockHeight: 2, - expectedStoreHeight: 2, - expectedStateAppHash: [32]byte{2}, + name: "ProduceSecondBlockSuccessfullyFromApp", + shoudFailSaveState: false, + // AppCommitHash: [32]byte{}, + // expected return from app + LastAppCommitHash: [32]byte{2}, + LastAppBlockHeight: 2, + expectedStoreHeight: 2, + expectedStateAppHash: [32]byte{2}, }, { - name: "ProduceThirdBlockFailOnUpdateStoreHeight", - shouldFailSetSetHeight: true, - shouldFailUpdateState: false, - AppCommitHash: [32]byte{3}, - LastAppCommitHash: [32]byte{2}, - LastAppBlockHeight: 2, - expectedStoreHeight: 2, - expectedStateAppHash: [32]byte{3}, + name: "ProduceThirdBlockFailOnUpdateStoreHeight", + shoudFailSaveState: true, + AppCommitHash: [32]byte{3}, + // LastAppCommitHash: [32]byte{2}, + // LastAppBlockHeight: 2, + expectedStoreHeight: 2, + expectedStateAppHash: [32]byte{2}, }, { - name: "ProduceThirdBlockSuccessfully", - shouldFailSetSetHeight: false, - shouldFailUpdateState: false, - AppCommitHash: [32]byte{}, - LastAppCommitHash: [32]byte{3}, - LastAppBlockHeight: 3, - expectedStoreHeight: 3, - expectedStateAppHash: [32]byte{3}, + name: "ProduceThirdBlockSuccessfully", + shoudFailSaveState: false, + AppCommitHash: [32]byte{}, + LastAppCommitHash: [32]byte{3}, + LastAppBlockHeight: 3, + expectedStoreHeight: 3, + expectedStateAppHash: [32]byte{3}, }, } for _, tc := range cases { @@ -304,14 +302,12 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { LastBlockHeight: tc.LastAppBlockHeight, LastBlockAppHash: tc.LastAppCommitHash[:], }) - mockStore.ShouldFailSetHeight = tc.shouldFailSetSetHeight - mockStore.ShoudFailUpdateState = tc.shouldFailUpdateState + mockStore.ShoudFailSaveState = tc.shoudFailSaveState _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) - require.Equal(tc.expectedStoreHeight, manager.State.Height(), tc.name) - require.Equal(tc.expectedStateAppHash, manager.State.AppHash, tc.name) storeState, err := manager.Store.LoadState() - require.NoError(err) - require.Equal(tc.expectedStateAppHash, storeState.AppHash, tc.name) + assert.NoError(err) + assert.Equal(tc.expectedStoreHeight, storeState.Height(), tc.name) + assert.Equal(tc.expectedStateAppHash, storeState.AppHash, tc.name) app.On("Commit", mock.Anything).Unset() app.On("Info", mock.Anything).Unset() diff --git a/block/pruning.go b/block/pruning.go index 27bb60025..cdd50ed0f 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -18,5 +18,14 @@ func (m *Manager) pruneBlocks(retainHeight uint64) (uint64, error) { // TODO: prune state/indexer and state/txindexer?? + newState := m.State + newState.BaseHeight = retainHeight + _, err = m.Store.SaveState(newState, nil) + if err != nil { + return 0, fmt.Errorf("final update state: %w", err) + } + m.State = newState + + m.logger.Info("pruned blocks", "pruned", pruned, "retain_height", retainHeight) return pruned, nil } diff --git a/block/state.go b/block/state.go index d580d7f1e..0ca4e4c54 100644 --- a/block/state.go +++ b/block/state.go @@ -43,26 +43,18 @@ func (m *Manager) UpdateStateFromApp() error { if err != nil { return errorsmod.Wrap(err, "load block responses") } - vals, err := m.Store.LoadValidators(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } // update the state with the hash, last store height and last validators. - //TODO: DRY with the post commit update - m.State.AppHash = *(*[32]byte)(proxyAppInfo.LastBlockAppHash) - m.State.Validators = m.State.NextValidators.Copy() - m.State.NextValidators = vals - copy(m.State.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - m.State.SetHeight(appHeight) - - //FIXME: load consensus params - - _, err = m.Store.SaveState(m.State, nil) + state := m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) + _, err = m.Store.SaveState(state, nil) if err != nil { return errorsmod.Wrap(err, "update state") } + m.State = state return nil } @@ -113,44 +105,42 @@ func (e *Executor) UpdateMempoolAfterInitChain(s types.State) { e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } -// UpdateStateFromResponses updates state based on the ABCIResponses. -func (e *Executor) UpdateStateFromResponses(state *types.State, resp *tmstate.ABCIResponses, block *types.Block) error { +// NextValSetFromResponses updates state based on the ABCIResponses. +func (e *Executor) NextValSetFromResponses(state types.State, resp *tmstate.ABCIResponses, block *types.Block) (*tmtypes.ValidatorSet, error) { // Dymint ignores any setValidator responses from the app, as it is manages the validator set based on the settlement consensus // TODO: this will be changed when supporting multiple sequencers from the hub - validatorUpdates := []*tmtypes.Validator{} - - if state.ConsensusParams.Block.MaxBytes == 0 { - e.logger.Error("maxBytes=0", "state.ConsensusParams.Block", state.ConsensusParams.Block) - } - - nValSet := state.NextValidators.Copy() - lastHeightValSetChanged := state.LastHeightValidatorsChanged - // Dymint can work without validators - if len(nValSet.Validators) > 0 { - if len(validatorUpdates) > 0 { - err := nValSet.UpdateWithChangeSet(validatorUpdates) - if err != nil { - return err + return state.NextValidators.Copy(), nil + + /* + nValSet := state.NextValidators.Copy() + lastHeightValSetChanged := state.LastHeightValidatorsChanged + // Dymint can work without validators + if len(nValSet.Validators) > 0 { + if len(validatorUpdates) > 0 { + err := nValSet.UpdateWithChangeSet(validatorUpdates) + if err != nil { + return err + } + // Change results from this height but only applies to the next next height. + lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) } - // Change results from this height but only applies to the next next height. - lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) - } - // TODO(tzdybal): right now, it's for backward compatibility, may need to change this - nValSet.IncrementProposerPriority(1) - } - - state.Validators = state.NextValidators.Copy() - state.NextValidators = nValSet - state.LastHeightValidatorsChanged = lastHeightValSetChanged - - return nil + // TODO(tzdybal): right now, it's for backward compatibility, may need to change this + nValSet.IncrementProposerPriority(1) + } + */ } // Update state from Commit response -func (e *Executor) UpdateStateFromCommitResponse(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64) { - // validators already set on UpdateStateFromResponses +func (e *Executor) UpdateStateAfterCommit(s types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, valSet *tmtypes.ValidatorSet) types.State { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) + + //TODO: load consensus params from endblock? + + s.Validators = s.NextValidators.Copy() + s.NextValidators = valSet.Copy() + s.SetHeight(height) + return s } diff --git a/node/node.go b/node/node.go index 6710ce7d2..79cf944e6 100644 --- a/node/node.go +++ b/node/node.go @@ -69,7 +69,7 @@ type Node struct { incomingTxCh chan *p2p.GossipMessage Store store.Store - blockManager *block.Manager + BlockManager *block.Manager dalc da.DataAvailabilityLayerClient settlementlc settlement.LayerI @@ -198,7 +198,7 @@ func NewNode( genesis: genesis, conf: conf, P2P: p2pClient, - blockManager: blockManager, + BlockManager: blockManager, dalc: dalc, settlementlc: settlementlc, Mempool: mp, @@ -271,7 +271,7 @@ func (n *Node) OnStart() error { }() // start the block manager - err = n.blockManager.Start(n.ctx) + err = n.BlockManager.Start(n.ctx) if err != nil { return fmt.Errorf("while starting block manager: %w", err) } @@ -383,5 +383,5 @@ func (n *Node) startPrometheusServer() error { // FIXME: read from block manager func (n *Node) GetBlockManagerHeight() uint64 { - return 0 + return n.BlockManager.State.Height() } diff --git a/rpc/client/client_test.go b/rpc/client/client_test.go index 3b6f37f8f..5f03f1139 100644 --- a/rpc/client/client_test.go +++ b/rpc/client/client_test.go @@ -289,6 +289,7 @@ func TestGetBlock(t *testing.T) { block := getRandomBlock(1, 10) _, err = node.Store.SaveBlock(block, &types.Commit{}, nil) + node.BlockManager.State.SetHeight(block.Header.Height) require.NoError(err) blockResp, err := rpc.Block(context.Background(), nil) @@ -316,6 +317,7 @@ func TestGetCommit(t *testing.T) { for _, b := range blocks { _, err = node.Store.SaveBlock(b, &types.Commit{Height: b.Header.Height}, nil) + node.BlockManager.State.SetHeight(b.Header.Height) require.NoError(err) } t.Run("Fetch all commits", func(t *testing.T) { @@ -585,7 +587,6 @@ func TestConsensusState(t *testing.T) { } func TestBlockchainInfo(t *testing.T) { - t.Skip("Test disabled as we need to increase the height of the block manager") //FIXME require := require.New(t) assert := assert.New(t) mockApp, rpc, node := getRPCAndNode(t) @@ -600,6 +601,7 @@ func TestBlockchainInfo(t *testing.T) { HeaderHash: block.Header.Hash(), }, nil) require.NoError(err) + node.BlockManager.State.SetHeight(block.Header.Height) } tests := []struct { diff --git a/store/store_test.go b/store/store_test.go index 22a6d7510..4a09775a1 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -95,10 +95,8 @@ func TestLoadState(t *testing.T) { expectedHeight := uint64(10) _, err := s1.SaveState(types.State{ LastBlockHeight: expectedHeight, - LastStoreHeight: uint64(expectedHeight), NextValidators: validatorSet, Validators: validatorSet, - LastValidators: validatorSet, }, nil) assert.NoError(err) diff --git a/testutil/mocks.go b/testutil/mocks.go index 1c633a3e2..045d4934f 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -90,8 +90,7 @@ func CountMockCalls(totalCalls []mock.Call, methodName string) int { // MockStore is a mock store for testing type MockStore struct { - ShouldFailSetHeight bool - ShoudFailUpdateState bool + ShoudFailSaveState bool ShouldFailUpdateStateWithBatch bool *store.DefaultStore height uint64 @@ -113,7 +112,7 @@ func (m *MockStore) NextHeight() uint64 { // UpdateState updates the state of the mock store func (m *MockStore) SaveState(state types.State, batch store.Batch) (store.Batch, error) { - if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailUpdateState && batch == nil { + if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") } return m.DefaultStore.SaveState(state, batch) @@ -123,10 +122,9 @@ func (m *MockStore) SaveState(state types.State, batch store.Batch) (store.Batch func NewMockStore() *MockStore { defaultStore := store.New(store.NewDefaultInMemoryKVStore()) return &MockStore{ - DefaultStore: defaultStore.(*store.DefaultStore), - height: 0, - ShouldFailSetHeight: false, - ShoudFailUpdateState: false, + DefaultStore: defaultStore.(*store.DefaultStore), + height: 0, + ShoudFailSaveState: false, } } diff --git a/testutil/types.go b/testutil/types.go index efe5a1ee1..b542f06e4 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -66,7 +66,7 @@ func generateBlock(height uint64) *types.Block { ConsensusHash: h[3], // AppHash: h[4], AppHash: [32]byte{}, - LastResultsHash: getEmptyLastResultsHash(), + LastResultsHash: GetEmptyLastResultsHash(), ProposerAddress: []byte{4, 3, 2, 1}, AggregatorsHash: h[6], }, @@ -198,7 +198,7 @@ func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { ChainID: "test-chain", InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, - LastResultsHash: getEmptyLastResultsHash(), + LastResultsHash: GetEmptyLastResultsHash(), Version: tmstate.Version{ Consensus: version.Consensus{ Block: BlockVersion, @@ -206,7 +206,6 @@ func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { }, }, LastBlockHeight: uint64(lastBlockHeight), - LastValidators: GenerateRandomValidatorSet(), Validators: GenerateRandomValidatorSet(), NextValidators: GenerateRandomValidatorSet(), } @@ -237,7 +236,7 @@ func GenerateGenesis(initialHeight int64) *tmtypes.GenesisDoc { } } -func getEmptyLastResultsHash() [32]byte { +func GetEmptyLastResultsHash() [32]byte { lastResults := []*abci.ResponseDeliverTx{} return *(*[32]byte)(tmtypes.NewResults(lastResults).Hash()) } diff --git a/types/serialization.go b/types/serialization.go index f4627657e..5f86b964a 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -251,10 +251,6 @@ func (s *State) ToProto() (*pb.State, error) { if err != nil { return nil, err } - lastValidators, err := s.LastValidators.ToProto() - if err != nil { - return nil, err - } return &pb.State{ Version: &s.Version, @@ -263,7 +259,6 @@ func (s *State) ToProto() (*pb.State, error) { LastBlockHeight: int64(s.LastBlockHeight), NextValidators: nextValidators, Validators: validators, - LastValidators: lastValidators, BaseHeight: s.BaseHeight, LastHeightValidatorsChanged: s.LastHeightValidatorsChanged, ConsensusParams: s.ConsensusParams, @@ -290,10 +285,6 @@ func (s *State) FromProto(other *pb.State) error { if err != nil { return err } - s.LastValidators, err = types.ValidatorSetFromProto(other.LastValidators) - if err != nil { - return err - } s.LastHeightValidatorsChanged = other.LastHeightValidatorsChanged s.ConsensusParams = other.ConsensusParams s.LastHeightConsensusParamsChanged = other.LastHeightConsensusParamsChanged diff --git a/types/serialization_test.go b/types/serialization_test.go index 3fff1b0ba..21a2f40c3 100644 --- a/types/serialization_test.go +++ b/types/serialization_test.go @@ -3,7 +3,6 @@ package types_test import ( "crypto/rand" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -98,7 +97,6 @@ func TestStateRoundTrip(t *testing.T) { { "with max bytes", types.State{ - LastValidators: valSet, Validators: valSet, NextValidators: valSet, ConsensusParams: tmproto.ConsensusParams{ @@ -120,21 +118,11 @@ func TestStateRoundTrip(t *testing.T) { }, Software: "dymint", }, - ChainID: "testchain", - InitialHeight: 987, - LastBlockHeight: 987654321, - LastStoreHeight: 987654321, - LastBlockID: tmtypes.BlockID{ - Hash: nil, - PartSetHeader: tmtypes.PartSetHeader{ - Total: 0, - Hash: nil, - }, - }, - LastBlockTime: time.Date(2022, 6, 6, 12, 12, 33, 44, time.UTC), + ChainID: "testchain", + InitialHeight: 987, + LastBlockHeight: 987654321, NextValidators: valSet, Validators: valSet, - LastValidators: valSet, LastHeightValidatorsChanged: 8272, ConsensusParams: tmproto.ConsensusParams{ Block: tmproto.BlockParams{ diff --git a/types/state.go b/types/state.go index 8f302a604..751f8c298 100644 --- a/types/state.go +++ b/types/state.go @@ -20,6 +20,7 @@ type State struct { InitialHeight uint64 // should be 1, not 0, when starting from height 1 // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + //TODO: should be atomic as can be queried by the RPC LastBlockHeight uint64 // BaseHeight is the height of the first block we have in store after pruning. @@ -27,7 +28,6 @@ type State struct { NextValidators *types.ValidatorSet Validators *types.ValidatorSet - LastValidators *types.ValidatorSet LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. @@ -71,7 +71,6 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { NextValidators: types.NewValidatorSet(nil), Validators: types.NewValidatorSet(nil), - LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: genDoc.InitialHeight, ConsensusParams: *genDoc.ConsensusParams, From 3992f2ac8fb9403c472c40d31a24fa6b1d20bdd1 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 13 May 2024 15:29:16 +0300 Subject: [PATCH 30/35] fixed LastBlockHeight to be atomic --- block/block.go | 7 +++---- block/executor_test.go | 7 +++---- block/manager_test.go | 5 +++-- block/pruning.go | 6 ++---- block/state.go | 7 +++---- node/node.go | 1 - store/store.go | 11 ----------- store/store_test.go | 13 +++++++------ test/loadtime/cmd/report/main.go | 13 ++++++++++--- testutil/types.go | 9 +++++---- types/serialization.go | 4 ++-- types/serialization_test.go | 6 +++++- types/state.go | 12 ++++++------ 13 files changed, 49 insertions(+), 52 deletions(-) diff --git a/block/block.go b/block/block.go index 25a4e18ca..8406e628b 100644 --- a/block/block.go +++ b/block/block.go @@ -58,7 +58,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("save block responses: %w", err) } - // Updates the state with validator changes and consensus params changes from the app + // Get the validator changes from the app validators, err := m.Executor.NextValSetFromResponses(m.State, responses, block) if err != nil { return fmt.Errorf("update state from responses: %w", err) @@ -86,12 +86,11 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - newState := m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, validators) - _, err = m.Store.SaveState(newState, nil) + _ = m.Executor.UpdateStateAfterCommit(&m.State, responses, appHash, block.Header.Height, validators) + _, err = m.Store.SaveState(m.State, nil) if err != nil { return fmt.Errorf("update state: %w", err) } - m.State = newState // Prune old heights, if requested by ABCI app. if retainHeight > 0 { diff --git a/block/executor_test.go b/block/executor_test.go index dccbb1b99..66f464e0f 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -145,7 +145,7 @@ func TestApplyBlock(t *testing.T) { Validators: tmtypes.NewValidatorSet(nil), } state.InitialHeight = 1 - state.LastBlockHeight = 0 + state.LastBlockHeight.Store(0) maxBytes := uint64(100) state.ConsensusParams.Block.MaxBytes = int64(maxBytes) state.ConsensusParams.Block.MaxGas = 100000 @@ -183,7 +183,7 @@ func TestApplyBlock(t *testing.T) { require.NotNil(resp) appHash, _, err := executor.Commit(state, block, resp) require.NoError(err) - state = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, state.Validators) + _ = executor.UpdateStateAfterCommit(&state, resp, appHash, block.Header.Height, state.Validators) assert.Equal(uint64(1), state.Height()) assert.Equal(mockAppHash, state.AppHash) @@ -236,8 +236,7 @@ func TestApplyBlock(t *testing.T) { require.NoError(err) _, _, err = executor.Commit(state, block, resp) require.NoError(err) - state = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, vals) - + _ = executor.UpdateStateAfterCommit(&state, resp, appHash, block.Header.Height, vals) assert.Equal(uint64(2), state.Height()) // wait for at least 4 Tx events, for up to 3 second. diff --git a/block/manager_test.go b/block/manager_test.go index cf048b0be..6bc541d10 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -86,7 +86,7 @@ func TestInitialState(t *testing.T) { store: fullStore, genesis: genesis, expectedInitialHeight: sampleState.InitialHeight, - expectedLastBlockHeight: sampleState.LastBlockHeight, + expectedLastBlockHeight: sampleState.LastBlockHeight.Load(), expectedChainID: sampleState.ChainID, }, } @@ -100,7 +100,7 @@ func TestInitialState(t *testing.T) { assert.NotNil(agg) assert.Equal(c.expectedChainID, agg.State.ChainID) assert.Equal(c.expectedInitialHeight, agg.State.InitialHeight) - assert.Equal(c.expectedLastBlockHeight, agg.State.LastBlockHeight) + assert.Equal(c.expectedLastBlockHeight, agg.State.LastBlockHeight.Load()) }) } } @@ -306,6 +306,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) storeState, err := manager.Store.LoadState() assert.NoError(err) + manager.State = storeState assert.Equal(tc.expectedStoreHeight, storeState.Height(), tc.name) assert.Equal(tc.expectedStateAppHash, storeState.AppHash, tc.name) diff --git a/block/pruning.go b/block/pruning.go index cdd50ed0f..17e196932 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -18,13 +18,11 @@ func (m *Manager) pruneBlocks(retainHeight uint64) (uint64, error) { // TODO: prune state/indexer and state/txindexer?? - newState := m.State - newState.BaseHeight = retainHeight - _, err = m.Store.SaveState(newState, nil) + m.State.BaseHeight = retainHeight + _, err = m.Store.SaveState(m.State, nil) if err != nil { return 0, fmt.Errorf("final update state: %w", err) } - m.State = newState m.logger.Info("pruned blocks", "pruned", pruned, "retain_height", retainHeight) return pruned, nil diff --git a/block/state.go b/block/state.go index 0ca4e4c54..ffd3db414 100644 --- a/block/state.go +++ b/block/state.go @@ -49,12 +49,11 @@ func (m *Manager) UpdateStateFromApp() error { } // update the state with the hash, last store height and last validators. - state := m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) - _, err = m.Store.SaveState(state, nil) + _ = m.Executor.UpdateStateAfterCommit(&m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) + _, err = m.Store.SaveState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") } - m.State = state return nil } @@ -132,7 +131,7 @@ func (e *Executor) NextValSetFromResponses(state types.State, resp *tmstate.ABCI } // Update state from Commit response -func (e *Executor) UpdateStateAfterCommit(s types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, valSet *tmtypes.ValidatorSet) types.State { +func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, valSet *tmtypes.ValidatorSet) *types.State { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) diff --git a/node/node.go b/node/node.go index 79cf944e6..e07db33c4 100644 --- a/node/node.go +++ b/node/node.go @@ -381,7 +381,6 @@ func (n *Node) startPrometheusServer() error { return nil } -// FIXME: read from block manager func (n *Node) GetBlockManagerHeight() uint64 { return n.BlockManager.State.Height() } diff --git a/store/store.go b/store/store.go index 0c38b5cbb..faec34fae 100644 --- a/store/store.go +++ b/store/store.go @@ -4,7 +4,6 @@ import ( "encoding/binary" "errors" "fmt" - "sync/atomic" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -45,16 +44,6 @@ func (s *DefaultStore) NewBatch() Batch { return s.db.NewBatch() } -// SetHeight sets the height of the store -func (s *DefaultStore) SetHeight(height uint64) { - atomic.StoreUint64(&s.height, height) -} - -// Height returns height of the highest block saved in the Store. -func (s *DefaultStore) Height() uint64 { - return atomic.LoadUint64(&s.height) -} - // SaveBlock adds block to the store along with corresponding commit. // Stored height is updated if block height is greater than stored value. // In case a batch is provided, the block and commit are added to the batch and not saved. diff --git a/store/store_test.go b/store/store_test.go index 4a09775a1..c166218ea 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -93,18 +93,19 @@ func TestLoadState(t *testing.T) { kv := store.NewDefaultInMemoryKVStore() s1 := store.New(kv) expectedHeight := uint64(10) - _, err := s1.SaveState(types.State{ - LastBlockHeight: expectedHeight, - NextValidators: validatorSet, - Validators: validatorSet, - }, nil) + s := types.State{ + NextValidators: validatorSet, + Validators: validatorSet, + } + s.LastBlockHeight.Store(expectedHeight) + _, err := s1.SaveState(s, nil) assert.NoError(err) s2 := store.New(kv) state, err := s2.LoadState() assert.NoError(err) - assert.Equal(expectedHeight, state.LastBlockHeight) + assert.Equal(expectedHeight, state.LastBlockHeight.Load()) } func TestBlockResponses(t *testing.T) { diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 0bc1ae98e..2089d847f 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -22,7 +22,13 @@ var mainPrefix = [1]byte{0} // BlockStore is a thin wrapper around the DefaultStore which will be used for inspecting the blocks type BlockStore struct { *store.DefaultStore - base uint64 + base uint64 + height uint64 +} + +// Height implements report.BlockStore. +func (b *BlockStore) Height() uint64 { + return b.height } // Base will be used to get the block height of the first block we want to generate the report for @@ -40,13 +46,14 @@ func getStore(directory string) *store.PrefixKV { func newBlockStore(kvstore store.KVStore, baseHeight uint64) *BlockStore { store := store.New(kvstore).(*store.DefaultStore) - _, err := store.LoadState() + state, err := store.LoadState() if err != nil { log.Fatalf("loading state %s", err) } return &BlockStore{ DefaultStore: store, - base: baseHeight, + base: state.BaseHeight, + height: state.LastBlockHeight.Load(), } } diff --git a/testutil/types.go b/testutil/types.go index b542f06e4..c19b0f6dc 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -194,7 +194,7 @@ func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { // GenerateState generates an initial state for testing. func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { - return types.State{ + s := types.State{ ChainID: "test-chain", InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, @@ -205,10 +205,11 @@ func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { App: AppVersion, }, }, - LastBlockHeight: uint64(lastBlockHeight), - Validators: GenerateRandomValidatorSet(), - NextValidators: GenerateRandomValidatorSet(), + Validators: GenerateRandomValidatorSet(), + NextValidators: GenerateRandomValidatorSet(), } + s.LastBlockHeight.Store(uint64(lastBlockHeight)) + return s } // GenerateGenesis generates a genesis for testing. diff --git a/types/serialization.go b/types/serialization.go index 5f86b964a..cb51dd63c 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -256,7 +256,7 @@ func (s *State) ToProto() (*pb.State, error) { Version: &s.Version, ChainId: s.ChainID, InitialHeight: int64(s.InitialHeight), - LastBlockHeight: int64(s.LastBlockHeight), + LastBlockHeight: int64(s.LastBlockHeight.Load()), NextValidators: nextValidators, Validators: validators, BaseHeight: s.BaseHeight, @@ -274,7 +274,7 @@ func (s *State) FromProto(other *pb.State) error { s.Version = *other.Version s.ChainID = other.ChainId s.InitialHeight = uint64(other.InitialHeight) - s.LastBlockHeight = uint64(other.LastBlockHeight) + s.LastBlockHeight.Store(uint64(other.LastBlockHeight)) s.BaseHeight = other.BaseHeight s.NextValidators, err = types.ValidatorSetFromProto(other.NextValidators) diff --git a/types/serialization_test.go b/types/serialization_test.go index 21a2f40c3..4e202c574 100644 --- a/types/serialization_test.go +++ b/types/serialization_test.go @@ -120,7 +120,6 @@ func TestStateRoundTrip(t *testing.T) { }, ChainID: "testchain", InitialHeight: 987, - LastBlockHeight: 987654321, NextValidators: valSet, Validators: valSet, LastHeightValidatorsChanged: 8272, @@ -153,6 +152,11 @@ func TestStateRoundTrip(t *testing.T) { t.Run(c.name, func(t *testing.T) { require := require.New(t) assert := assert.New(t) + + if c.state.InitialHeight != 0 { + c.state.LastBlockHeight.Store(986321) + } + pState, err := c.state.ToProto() require.NoError(err) require.NotNil(pState) diff --git a/types/state.go b/types/state.go index 751f8c298..f5ef5be99 100644 --- a/types/state.go +++ b/types/state.go @@ -2,6 +2,7 @@ package types import ( "fmt" + "sync/atomic" // TODO(tzdybal): copy to local project? tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -20,8 +21,7 @@ type State struct { InitialHeight uint64 // should be 1, not 0, when starting from height 1 // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - //TODO: should be atomic as can be queried by the RPC - LastBlockHeight uint64 + LastBlockHeight atomic.Uint64 // BaseHeight is the height of the first block we have in store after pruning. BaseHeight uint64 @@ -66,8 +66,7 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { ChainID: genDoc.ChainID, InitialHeight: uint64(genDoc.InitialHeight), - LastBlockHeight: 0, - BaseHeight: uint64(genDoc.InitialHeight), + BaseHeight: uint64(genDoc.InitialHeight), NextValidators: types.NewValidatorSet(nil), Validators: types.NewValidatorSet(nil), @@ -76,6 +75,7 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { ConsensusParams: *genDoc.ConsensusParams, LastHeightConsensusParamsChanged: genDoc.InitialHeight, } + s.LastBlockHeight.Store(0) copy(s.AppHash[:], genDoc.AppHash) return s, nil @@ -88,12 +88,12 @@ func (s *State) IsGenesis() bool { // SetHeight sets the height saved in the Store if it is higher than the existing height // returns OK if the value was updated successfully or did not need to be updated func (s *State) SetHeight(height uint64) { - s.LastBlockHeight = height + s.LastBlockHeight.Store(height) } // Height returns height of the highest block saved in the Store. func (s *State) Height() uint64 { - return s.LastBlockHeight + return s.LastBlockHeight.Load() } // NextHeight returns the next height that expected to be stored in store. From 5b3cda087a0fde46f34fe5797bd2b92b4d822043 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Mon, 13 May 2024 15:53:59 +0300 Subject: [PATCH 31/35] avoid copying state and pass by reference --- block/block.go | 2 +- block/executor.go | 8 ++++---- block/executor_test.go | 8 ++++---- block/initchain.go | 2 +- block/manager.go | 2 +- block/state.go | 12 ++++++------ p2p/validator_test.go | 2 +- store/pruning_test.go | 8 ++++---- store/store.go | 14 ++++++-------- store/storeIface.go | 4 ++-- store/store_test.go | 2 +- testutil/mocks.go | 2 +- testutil/types.go | 4 ++-- types/state.go | 6 +++--- types/validation.go | 4 ++-- 15 files changed, 39 insertions(+), 41 deletions(-) diff --git a/block/block.go b/block/block.go index 8406e628b..815e5c55e 100644 --- a/block/block.go +++ b/block/block.go @@ -86,7 +86,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - _ = m.Executor.UpdateStateAfterCommit(&m.State, responses, appHash, block.Header.Height, validators) + _ = m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, validators) _, err = m.Store.SaveState(m.State, nil) if err != nil { return fmt.Errorf("update state: %w", err) diff --git a/block/executor.go b/block/executor.go index e40542b5e..4c3855f32 100644 --- a/block/executor.go +++ b/block/executor.go @@ -96,7 +96,7 @@ func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, validators []*tmtypes. } // CreateBlock reaps transactions from mempool and builds a block. -func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, state types.State, maxBytes uint64) *types.Block { +func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, state *types.State, maxBytes uint64) *types.Block { if state.ConsensusParams.Block.MaxBytes > 0 { maxBytes = min(maxBytes, uint64(state.ConsensusParams.Block.MaxBytes)) } @@ -134,7 +134,7 @@ func (e *Executor) CreateBlock(height uint64, lastCommit *types.Commit, lastHead } // Commit commits the block -func (e *Executor) Commit(state types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { +func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { return nil, 0, err @@ -153,7 +153,7 @@ func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } -func (e *Executor) commit(state types.State, block *types.Block, deliverTxs []*abci.ResponseDeliverTx) ([]byte, int64, error) { +func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []*abci.ResponseDeliverTx) ([]byte, int64, error) { e.mempool.Lock() defer e.mempool.Unlock() @@ -180,7 +180,7 @@ func (e *Executor) commit(state types.State, block *types.Block, deliverTxs []*a } // ExecuteBlock executes the block and returns the ABCIResponses. Block should be valid (passed validation checks). -func (e *Executor) ExecuteBlock(state types.State, block *types.Block) (*tmstate.ABCIResponses, error) { +func (e *Executor) ExecuteBlock(state *types.State, block *types.Block) (*tmstate.ABCIResponses, error) { abciResponses := new(tmstate.ABCIResponses) abciResponses.DeliverTxs = make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) diff --git a/block/executor_test.go b/block/executor_test.go index 66f464e0f..eaf8f531d 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -51,7 +51,7 @@ func TestCreateBlock(t *testing.T) { maxBytes := uint64(100) - state := types.State{} + state := &types.State{} state.ConsensusParams.Block.MaxBytes = int64(maxBytes) state.ConsensusParams.Block.MaxGas = 100000 state.Validators = tmtypes.NewValidatorSet(nil) @@ -140,7 +140,7 @@ func TestApplyBlock(t *testing.T) { require.NotNil(headerSub) // Init state - state := types.State{ + state := &types.State{ NextValidators: tmtypes.NewValidatorSet(nil), Validators: tmtypes.NewValidatorSet(nil), } @@ -183,7 +183,7 @@ func TestApplyBlock(t *testing.T) { require.NotNil(resp) appHash, _, err := executor.Commit(state, block, resp) require.NoError(err) - _ = executor.UpdateStateAfterCommit(&state, resp, appHash, block.Header.Height, state.Validators) + _ = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, state.Validators) assert.Equal(uint64(1), state.Height()) assert.Equal(mockAppHash, state.AppHash) @@ -236,7 +236,7 @@ func TestApplyBlock(t *testing.T) { require.NoError(err) _, _, err = executor.Commit(state, block, resp) require.NoError(err) - _ = executor.UpdateStateAfterCommit(&state, resp, appHash, block.Header.Height, vals) + _ = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, vals) assert.Equal(uint64(2), state.Height()) // wait for at least 4 Tx events, for up to 3 second. diff --git a/block/initchain.go b/block/initchain.go index 727505f38..c7c694c1f 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -23,7 +23,7 @@ func (m *Manager) RunInitChain(ctx context.Context) error { } // update the state with only the consensus pubkey - m.Executor.UpdateStateAfterInitChain(&m.State, res, gensisValSet) + m.Executor.UpdateStateAfterInitChain(m.State, res, gensisValSet) m.Executor.UpdateMempoolAfterInitChain(m.State) if _, err := m.Store.SaveState(m.State, nil); err != nil { return err diff --git a/block/manager.go b/block/manager.go index 72fbd300d..0308dd846 100644 --- a/block/manager.go +++ b/block/manager.go @@ -38,7 +38,7 @@ type Manager struct { // Store and execution Store store.Store - State types.State + State *types.State Executor *Executor // Clients and servers diff --git a/block/state.go b/block/state.go index ffd3db414..ed9209c14 100644 --- a/block/state.go +++ b/block/state.go @@ -17,15 +17,15 @@ import ( ) // getInitialState tries to load lastState from Store, and if it's not available it reads GenesisDoc. -func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (s types.State, err error) { - s, err = store.LoadState() +func getInitialState(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) (*types.State, error) { + s, err := store.LoadState() if errors.Is(err, types.ErrNoStateFound) { logger.Info("failed to find state in the store, creating new state from genesis") s, err = types.NewStateFromGenesis(genesis) } if err != nil { - return types.State{}, fmt.Errorf("get initial state: %w", err) + return nil, fmt.Errorf("get initial state: %w", err) } return s, nil @@ -49,7 +49,7 @@ func (m *Manager) UpdateStateFromApp() error { } // update the state with the hash, last store height and last validators. - _ = m.Executor.UpdateStateAfterCommit(&m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) + _ = m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) _, err = m.Store.SaveState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") @@ -99,13 +99,13 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.NextValidators = s.Validators.Copy() } -func (e *Executor) UpdateMempoolAfterInitChain(s types.State) { +func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { e.mempool.SetPreCheckFn(mempool.PreCheckMaxBytes(s.ConsensusParams.Block.MaxBytes)) e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } // NextValSetFromResponses updates state based on the ABCIResponses. -func (e *Executor) NextValSetFromResponses(state types.State, resp *tmstate.ABCIResponses, block *types.Block) (*tmtypes.ValidatorSet, error) { +func (e *Executor) NextValSetFromResponses(state *types.State, resp *tmstate.ABCIResponses, block *types.Block) (*tmtypes.ValidatorSet, error) { // Dymint ignores any setValidator responses from the app, as it is manages the validator set based on the settlement consensus // TODO: this will be changed when supporting multiple sequencers from the hub return state.NextValidators.Copy(), nil diff --git a/p2p/validator_test.go b/p2p/validator_test.go index 2c90b44a9..27f5bc9c1 100644 --- a/p2p/validator_test.go +++ b/p2p/validator_test.go @@ -141,7 +141,7 @@ func TestValidator_BlockValidator(t *testing.T) { state.Validators = tmtypes.NewValidatorSet(nil) //Create empty block - block := executor.CreateBlock(1, &types.Commit{}, [32]byte{}, state, maxBytes) + block := executor.CreateBlock(1, &types.Commit{}, [32]byte{}, &state, maxBytes) //Create slclient client := registry.GetClient(registry.Local) diff --git a/store/pruning_test.go b/store/pruning_test.go index 295c95daa..fc3fea85d 100644 --- a/store/pruning_test.go +++ b/store/pruning_test.go @@ -19,7 +19,7 @@ func TestStorePruning(t *testing.T) { to uint64 shouldError bool }{ - //todo :check exclusion of pruning height + // todo :check exclusion of pruning height {"blocks with pruning", []*types.Block{ testutil.GetRandomBlock(1, 0), @@ -57,11 +57,11 @@ func TestStorePruning(t *testing.T) { assert.NoError(err) savedHeights[block.Header.Height] = true - //TODO: add block responses and commits + // TODO: add block responses and commits } // Validate all blocks are saved - for k, _ := range savedHeights { + for k := range savedHeights { _, err := bstore.LoadBlock(k) assert.NoError(err) } @@ -75,7 +75,7 @@ func TestStorePruning(t *testing.T) { assert.NoError(err) // Validate only blocks in the range are pruned - for k, _ := range savedHeights { + for k := range savedHeights { if k >= c.from && k < c.to { _, err := bstore.LoadBlock(k) assert.Error(err, "Block at height %d should be pruned", k) diff --git a/store/store.go b/store/store.go index faec34fae..3c206957e 100644 --- a/store/store.go +++ b/store/store.go @@ -26,8 +26,6 @@ var ( // DefaultStore is a default store implementation. type DefaultStore struct { db KVStore - - height uint64 // the highest block saved } var _ Store = &DefaultStore{} @@ -163,7 +161,7 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { // UpdateState updates state saved in Store. Only one State is stored. // If there is no State in Store, state will be saved. -func (s *DefaultStore) SaveState(state types.State, batch Batch) (Batch, error) { +func (s *DefaultStore) SaveState(state *types.State, batch Batch) (Batch, error) { pbState, err := state.ToProto() if err != nil { return batch, fmt.Errorf("marshal state to JSON: %w", err) @@ -181,24 +179,24 @@ func (s *DefaultStore) SaveState(state types.State, batch Batch) (Batch, error) } // LoadState returns last state saved with UpdateState. -func (s *DefaultStore) LoadState() (types.State, error) { +func (s *DefaultStore) LoadState() (*types.State, error) { blob, err := s.db.Get(getStateKey()) if err != nil { - return types.State{}, types.ErrNoStateFound + return nil, types.ErrNoStateFound } var pbState pb.State err = pbState.Unmarshal(blob) if err != nil { - return types.State{}, fmt.Errorf("unmarshal state from store: %w", err) + return nil, fmt.Errorf("unmarshal state from store: %w", err) } var state types.State err = state.FromProto(&pbState) if err != nil { - return types.State{}, fmt.Errorf("unmarshal state from proto: %w", err) + return nil, fmt.Errorf("unmarshal state from proto: %w", err) } - return state, nil + return &state, nil } // SaveValidators stores validator set for given block height in store. diff --git a/store/storeIface.go b/store/storeIface.go index 1be8ec1f5..3acb988e6 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -61,10 +61,10 @@ type Store interface { // UpdateState updates state saved in Store. Only one State is stored. // If there is no State in Store, state will be saved. - SaveState(state types.State, batch Batch) (Batch, error) + SaveState(state *types.State, batch Batch) (Batch, error) // LoadState returns last state saved with UpdateState. - LoadState() (types.State, error) + LoadState() (*types.State, error) SaveValidators(height uint64, validatorSet *tmtypes.ValidatorSet, batch Batch) (Batch, error) diff --git a/store/store_test.go b/store/store_test.go index c166218ea..40f272391 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -93,7 +93,7 @@ func TestLoadState(t *testing.T) { kv := store.NewDefaultInMemoryKVStore() s1 := store.New(kv) expectedHeight := uint64(10) - s := types.State{ + s := &types.State{ NextValidators: validatorSet, Validators: validatorSet, } diff --git a/testutil/mocks.go b/testutil/mocks.go index 045d4934f..b4cb09967 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -111,7 +111,7 @@ func (m *MockStore) NextHeight() uint64 { } // UpdateState updates the state of the mock store -func (m *MockStore) SaveState(state types.State, batch store.Batch) (store.Batch, error) { +func (m *MockStore) SaveState(state *types.State, batch store.Batch) (store.Batch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") } diff --git a/testutil/types.go b/testutil/types.go index c19b0f6dc..a9b41a582 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -193,8 +193,8 @@ func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { } // GenerateState generates an initial state for testing. -func GenerateState(initialHeight int64, lastBlockHeight int64) types.State { - s := types.State{ +func GenerateState(initialHeight int64, lastBlockHeight int64) *types.State { + s := &types.State{ ChainID: "test-chain", InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, diff --git a/types/state.go b/types/state.go index f5ef5be99..d113dd092 100644 --- a/types/state.go +++ b/types/state.go @@ -43,10 +43,10 @@ type State struct { } // NewStateFromGenesis reads blockchain State from genesis. -func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { +func NewStateFromGenesis(genDoc *types.GenesisDoc) (*State, error) { err := genDoc.ValidateAndComplete() if err != nil { - return State{}, fmt.Errorf("in genesis doc: %w", err) + return nil, fmt.Errorf("in genesis doc: %w", err) } // InitStateVersion sets the Consensus.Block and Software versions, @@ -78,7 +78,7 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (State, error) { s.LastBlockHeight.Store(0) copy(s.AppHash[:], genDoc.AppHash) - return s, nil + return &s, nil } func (s *State) IsGenesis() bool { diff --git a/types/validation.go b/types/validation.go index b1e2faf6e..8e84f68e1 100644 --- a/types/validation.go +++ b/types/validation.go @@ -8,7 +8,7 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -func ValidateProposedTransition(state State, block *Block, commit *Commit, proposer *Sequencer) error { +func ValidateProposedTransition(state *State, block *Block, commit *Commit, proposer *Sequencer) error { if err := block.ValidateWithState(state); err != nil { return fmt.Errorf("block: %w", err) } @@ -39,7 +39,7 @@ func (b *Block) ValidateBasic() error { return nil } -func (b *Block) ValidateWithState(state State) error { +func (b *Block) ValidateWithState(state *State) error { err := b.ValidateBasic() if err != nil { return err From 953a9b82456393d4e1ab5270719a02dbc68af528 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 15 May 2024 10:40:30 +0300 Subject: [PATCH 32/35] fixed PR comments --- block/block.go | 2 +- block/executor_test.go | 4 ++-- block/manager_test.go | 44 ++++++++++++++++-------------------------- block/retriever.go | 2 +- block/state.go | 24 ++--------------------- store/pruning.go | 2 +- store/pruning_test.go | 9 ++++++--- types/state.go | 11 ----------- 8 files changed, 30 insertions(+), 68 deletions(-) diff --git a/block/block.go b/block/block.go index 815e5c55e..567ff3dbf 100644 --- a/block/block.go +++ b/block/block.go @@ -86,7 +86,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta // Update the state with the new app hash, last validators and store height from the commit. // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. - _ = m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, validators) + m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, validators) _, err = m.Store.SaveState(m.State, nil) if err != nil { return fmt.Errorf("update state: %w", err) diff --git a/block/executor_test.go b/block/executor_test.go index eaf8f531d..e4eca2f55 100644 --- a/block/executor_test.go +++ b/block/executor_test.go @@ -183,7 +183,7 @@ func TestApplyBlock(t *testing.T) { require.NotNil(resp) appHash, _, err := executor.Commit(state, block, resp) require.NoError(err) - _ = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, state.Validators) + executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, state.Validators) assert.Equal(uint64(1), state.Height()) assert.Equal(mockAppHash, state.AppHash) @@ -236,7 +236,7 @@ func TestApplyBlock(t *testing.T) { require.NoError(err) _, _, err = executor.Commit(state, block, resp) require.NoError(err) - _ = executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, vals) + executor.UpdateStateAfterCommit(state, resp, appHash, block.Header.Height, vals) assert.Equal(uint64(2), state.Height()) // wait for at least 4 Tx events, for up to 3 second. diff --git a/block/manager_test.go b/block/manager_test.go index 6bc541d10..348850165 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -240,7 +240,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { cases := []struct { name string - shoudFailSaveState bool + shoudFailOnSaveState bool LastAppBlockHeight int64 AppCommitHash [32]byte LastAppCommitHash [32]byte @@ -248,47 +248,37 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { expectedStateAppHash [32]byte }{ { - name: "ProduceFirstBlockSuccessfully", - shoudFailSaveState: false, - AppCommitHash: [32]byte{1}, - // LastAppCommitHash: [32]byte{0}, - // LastAppBlockHeight: 0, + name: "ProduceFirstBlockSuccessfully", + shoudFailOnSaveState: false, + AppCommitHash: [32]byte{1}, expectedStoreHeight: 1, expectedStateAppHash: [32]byte{1}, }, { - name: "ProduceSecondBlockFailOnUpdateState", - shoudFailSaveState: true, - AppCommitHash: [32]byte{2}, - // LastAppCommitHash: [32]byte{}, - // LastAppBlockHeight: 0, - // state not changed on failed save state - expectedStoreHeight: 1, + name: "ProduceSecondBlockFailOnUpdateState", + shoudFailOnSaveState: true, + AppCommitHash: [32]byte{2}, + expectedStoreHeight: 1, // height not changed on failed save state expectedStateAppHash: [32]byte{1}, }, { - name: "ProduceSecondBlockSuccessfullyFromApp", - shoudFailSaveState: false, - // AppCommitHash: [32]byte{}, - // expected return from app - LastAppCommitHash: [32]byte{2}, + name: "ProduceSecondBlockSuccessfullyFromApp", + shoudFailOnSaveState: false, + LastAppCommitHash: [32]byte{2}, // loading state from app LastAppBlockHeight: 2, expectedStoreHeight: 2, expectedStateAppHash: [32]byte{2}, }, { - name: "ProduceThirdBlockFailOnUpdateStoreHeight", - shoudFailSaveState: true, - AppCommitHash: [32]byte{3}, - // LastAppCommitHash: [32]byte{2}, - // LastAppBlockHeight: 2, - expectedStoreHeight: 2, + name: "ProduceThirdBlockFailOnUpdateStoreHeight", + shoudFailOnSaveState: true, + AppCommitHash: [32]byte{3}, + expectedStoreHeight: 2, // height not changed on failed save state expectedStateAppHash: [32]byte{2}, }, { name: "ProduceThirdBlockSuccessfully", - shoudFailSaveState: false, - AppCommitHash: [32]byte{}, + shoudFailOnSaveState: false, LastAppCommitHash: [32]byte{3}, LastAppBlockHeight: 3, expectedStoreHeight: 3, @@ -302,7 +292,7 @@ func TestProduceBlockFailAfterCommit(t *testing.T) { LastBlockHeight: tc.LastAppBlockHeight, LastBlockAppHash: tc.LastAppCommitHash[:], }) - mockStore.ShoudFailSaveState = tc.shoudFailSaveState + mockStore.ShoudFailSaveState = tc.shoudFailOnSaveState _, _, _ = manager.ProduceAndGossipBlock(context.Background(), true) storeState, err := manager.Store.LoadState() assert.NoError(err) diff --git a/block/retriever.go b/block/retriever.go index 420ca41fd..240d1a902 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -76,7 +76,7 @@ func (m *Manager) queryStateIndex() (uint64, error) { var stateIndex uint64 return stateIndex, retry.Do( func() error { - res, err := m.SLClient.GetHeightState(m.State.Height() + 1) + res, err := m.SLClient.GetHeightState(m.State.NextHeight()) if err != nil { m.logger.Debug("sl client get height state", "error", err) return err diff --git a/block/state.go b/block/state.go index ed9209c14..1207df93a 100644 --- a/block/state.go +++ b/block/state.go @@ -49,7 +49,7 @@ func (m *Manager) UpdateStateFromApp() error { } // update the state with the hash, last store height and last validators. - _ = m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) + m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, vals) _, err = m.Store.SaveState(m.State, nil) if err != nil { return errorsmod.Wrap(err, "update state") @@ -109,29 +109,10 @@ func (e *Executor) NextValSetFromResponses(state *types.State, resp *tmstate.ABC // Dymint ignores any setValidator responses from the app, as it is manages the validator set based on the settlement consensus // TODO: this will be changed when supporting multiple sequencers from the hub return state.NextValidators.Copy(), nil - - /* - nValSet := state.NextValidators.Copy() - lastHeightValSetChanged := state.LastHeightValidatorsChanged - // Dymint can work without validators - if len(nValSet.Validators) > 0 { - if len(validatorUpdates) > 0 { - err := nValSet.UpdateWithChangeSet(validatorUpdates) - if err != nil { - return err - } - // Change results from this height but only applies to the next next height. - lastHeightValSetChanged = int64(block.Header.Height + 1 + 1) - } - - // TODO(tzdybal): right now, it's for backward compatibility, may need to change this - nValSet.IncrementProposerPriority(1) - } - */ } // Update state from Commit response -func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, valSet *tmtypes.ValidatorSet) *types.State { +func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, valSet *tmtypes.ValidatorSet) { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) @@ -141,5 +122,4 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp s.NextValidators = valSet.Copy() s.SetHeight(height) - return s } diff --git a/store/pruning.go b/store/pruning.go index 78d2ce0b4..098451783 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -10,7 +10,7 @@ func (s *DefaultStore) PruneBlocks(from, to uint64) (uint64, error) { return 0, fmt.Errorf("from height must be greater than 0") } - if to < from { + if to <= from { return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", to, from) } diff --git a/store/pruning_test.go b/store/pruning_test.go index fc3fea85d..850d4a9ad 100644 --- a/store/pruning_test.go +++ b/store/pruning_test.go @@ -19,7 +19,6 @@ func TestStorePruning(t *testing.T) { to uint64 shouldError bool }{ - // todo :check exclusion of pruning height {"blocks with pruning", []*types.Block{ testutil.GetRandomBlock(1, 0), @@ -44,8 +43,12 @@ func TestStorePruning(t *testing.T) { testutil.GetRandomBlock(2, 0), testutil.GetRandomBlock(3, 0), }, 0, 1, true}, + {"pruning same height", []*types.Block{ + testutil.GetRandomBlock(1, 0), + testutil.GetRandomBlock(2, 0), + testutil.GetRandomBlock(3, 0), + }, 3, 3, true}, } - for _, c := range cases { t.Run(c.name, func(t *testing.T) { assert := assert.New(t) @@ -76,7 +79,7 @@ func TestStorePruning(t *testing.T) { // Validate only blocks in the range are pruned for k := range savedHeights { - if k >= c.from && k < c.to { + if k >= c.from && k < c.to { //k < c.to is the exclusion test _, err := bstore.LoadBlock(k) assert.Error(err, "Block at height %d should be pruned", k) diff --git a/types/state.go b/types/state.go index d113dd092..c74e2871e 100644 --- a/types/state.go +++ b/types/state.go @@ -103,14 +103,3 @@ func (s *State) NextHeight() uint64 { } return s.Height() + 1 } - -// SetBase sets the base height if it is higher than the existing base height -// returns OK if the value was updated successfully or did not need to be updated -func (s *State) SetBase(height uint64) { - s.BaseHeight = height -} - -// Base returns height of the earliest block saved in the Store. -func (s *State) Base() uint64 { - return s.BaseHeight -} From 576325ea1a09f4f98870466832b001ccbef847d7 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 15 May 2024 10:44:14 +0300 Subject: [PATCH 33/35] simplified genesis check on produce block --- block/produce.go | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/block/produce.go b/block/produce.go index bb1f7f2dc..7f163ecb6 100644 --- a/block/produce.go +++ b/block/produce.go @@ -87,25 +87,28 @@ func (m *Manager) ProduceAndGossipBlock(ctx context.Context, allowEmpty bool) (* return block, commit, nil } +func loadPrevBlock(store store.Store, height uint64) ([32]byte, *types.Commit, error) { + lastCommit, err := store.LoadCommit(height) + if err != nil { + return [32]byte{}, nil, fmt.Errorf("load commit: height: %d: %w", height, err) + } + lastBlock, err := store.LoadBlock(height) + if err != nil { + return [32]byte{}, nil, fmt.Errorf("load block after load commit: height: %d: %w", height, err) + } + return lastBlock.Header.Hash(), lastCommit, nil +} + func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, error) { - var ( - err error - lastHeaderHash [32]byte - lastCommit = &types.Commit{} - newHeight = m.State.NextHeight() - ) - - if !m.State.IsGenesis() { - height := newHeight - 1 - lastCommit, err = m.Store.LoadCommit(height) - if err != nil { - return nil, nil, fmt.Errorf("load commit: height: %d: %w: %w", height, err, ErrNonRecoverable) - } - lastBlock, err := m.Store.LoadBlock(height) - if err != nil { - return nil, nil, fmt.Errorf("load block after load commit: height: %d: %w: %w", height, err, ErrNonRecoverable) + newHeight := m.State.NextHeight() + lastHeaderHash, lastCommit, err := loadPrevBlock(m.Store, newHeight-1) + if err != nil { + if m.State.IsGenesis() { //allow prevBlock not to be found only on genesis + lastHeaderHash = [32]byte{} + lastCommit = &types.Commit{} + } else { + return nil, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } - lastHeaderHash = lastBlock.Header.Hash() } var block *types.Block From 31c8906e5b6c3035f1a6115844c4c85cb79cd4d7 Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 15 May 2024 17:07:37 +0300 Subject: [PATCH 34/35] pr comments --- block/block.go | 29 +++++++++++++++++++++++++++++ block/gossip.go | 29 ----------------------------- block/produce.go | 7 +++---- block/pruning.go | 2 +- store/pruning.go | 3 +-- 5 files changed, 34 insertions(+), 36 deletions(-) diff --git a/block/block.go b/block/block.go index 567ff3dbf..78d8255f9 100644 --- a/block/block.go +++ b/block/block.go @@ -116,6 +116,35 @@ func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { return isBlockAlreadyApplied, nil } +func (m *Manager) attemptApplyCachedBlocks() error { + m.retrieverMutex.Lock() + defer m.retrieverMutex.Unlock() + + for { + expectedHeight := m.State.NextHeight() + + cachedBlock, blockExists := m.blockCache[expectedHeight] + if !blockExists { + break + } + if err := m.validateBlock(cachedBlock.Block, cachedBlock.Commit); err != nil { + delete(m.blockCache, cachedBlock.Block.Header.Height) + /// TODO: can we take an action here such as dropping the peer / reducing their reputation? + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", cachedBlock.Block.Header.Height, err) + } + + err := m.applyBlock(cachedBlock.Block, cachedBlock.Commit, blockMetaData{source: gossipedBlock}) + if err != nil { + return fmt.Errorf("apply cached block: expected height: %d: %w", expectedHeight, err) + } + m.logger.Debug("applied cached block", "height", expectedHeight) + + delete(m.blockCache, cachedBlock.Block.Header.Height) + } + + return nil +} + func (m *Manager) validateBlock(block *types.Block, commit *types.Commit) error { // Currently we're assuming proposer is never nil as it's a pre-condition for // dymint to start diff --git a/block/gossip.go b/block/gossip.go index 3a8757754..b8b3fdf70 100644 --- a/block/gossip.go +++ b/block/gossip.go @@ -39,35 +39,6 @@ func (m *Manager) onNewGossipedBlock(event pubsub.Message) { } } -func (m *Manager) attemptApplyCachedBlocks() error { - m.retrieverMutex.Lock() - defer m.retrieverMutex.Unlock() - - for { - expectedHeight := m.State.NextHeight() - - cachedBlock, blockExists := m.blockCache[expectedHeight] - if !blockExists { - break - } - if err := m.validateBlock(cachedBlock.Block, cachedBlock.Commit); err != nil { - delete(m.blockCache, cachedBlock.Block.Header.Height) - /// TODO: can we take an action here such as dropping the peer / reducing their reputation? - return fmt.Errorf("block not valid at height %d, dropping it: err:%w", cachedBlock.Block.Header.Height, err) - } - - err := m.applyBlock(cachedBlock.Block, cachedBlock.Commit, blockMetaData{source: gossipedBlock}) - if err != nil { - return fmt.Errorf("apply cached block: expected height: %d: %w", expectedHeight, err) - } - m.logger.Debug("applied cached block", "height", expectedHeight) - - delete(m.blockCache, cachedBlock.Block.Header.Height) - } - - return nil -} - func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { gossipedBlock := p2p.GossipedBlock{Block: block, Commit: commit} gossipedBlockBytes, err := gossipedBlock.MarshalBinary() diff --git a/block/produce.go b/block/produce.go index 7f163ecb6..d67829c71 100644 --- a/block/produce.go +++ b/block/produce.go @@ -103,12 +103,11 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := loadPrevBlock(m.Store, newHeight-1) if err != nil { - if m.State.IsGenesis() { //allow prevBlock not to be found only on genesis - lastHeaderHash = [32]byte{} - lastCommit = &types.Commit{} - } else { + if !m.State.IsGenesis() { //allow prevBlock not to be found only on genesis return nil, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } + lastHeaderHash = [32]byte{} + lastCommit = &types.Commit{} } var block *types.Block diff --git a/block/pruning.go b/block/pruning.go index 17e196932..70977cfc9 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -21,7 +21,7 @@ func (m *Manager) pruneBlocks(retainHeight uint64) (uint64, error) { m.State.BaseHeight = retainHeight _, err = m.Store.SaveState(m.State, nil) if err != nil { - return 0, fmt.Errorf("final update state: %w", err) + return 0, fmt.Errorf("save state: %w", err) } m.logger.Info("pruned blocks", "pruned", pruned, "retain_height", retainHeight) diff --git a/store/pruning.go b/store/pruning.go index 098451783..cebec016e 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -11,8 +11,7 @@ func (s *DefaultStore) PruneBlocks(from, to uint64) (uint64, error) { } if to <= from { - return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", - to, from) + return 0, fmt.Errorf("to height (%d) must be greater than from height (%d)", to, from) } pruned := uint64(0) From 74bfba0fe0e01491d210498953f7d8bc32013cab Mon Sep 17 00:00:00 2001 From: Michael Tsitrin Date: Wed, 15 May 2024 20:56:27 +0300 Subject: [PATCH 35/35] linter --- block/manager_test.go | 4 ++-- block/produce.go | 4 ++-- block/state.go | 2 +- indexers/txindex/indexer_service.go | 2 +- mempool/cache.go | 2 +- mempool/v1/mempool.go | 2 +- rpc/client/client.go | 2 +- store/pruning_test.go | 3 +-- types/state.go | 2 +- 9 files changed, 11 insertions(+), 12 deletions(-) diff --git a/block/manager_test.go b/block/manager_test.go index 348850165..4c08adee6 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -210,8 +210,8 @@ func TestProducePendingBlock(t *testing.T) { require.NoError(t, err) // Validate state is updated with the block that was saved in the store - //TODO: fix this test - //hacky way to validate the block was indeed contain txs + // TODO: fix this test + // hacky way to validate the block was indeed contain txs assert.NotEqual(t, manager.State.LastResultsHash, testutil.GetEmptyLastResultsHash()) } diff --git a/block/produce.go b/block/produce.go index d67829c71..a68823d9b 100644 --- a/block/produce.go +++ b/block/produce.go @@ -103,7 +103,7 @@ func (m *Manager) produceBlock(allowEmpty bool) (*types.Block, *types.Commit, er newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := loadPrevBlock(m.Store, newHeight-1) if err != nil { - if !m.State.IsGenesis() { //allow prevBlock not to be found only on genesis + if !m.State.IsGenesis() { // allow prevBlock not to be found only on genesis return nil, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } lastHeaderHash = [32]byte{} @@ -186,7 +186,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, } v := vote.ToProto() // convert libp2p key to tm key - //TODO: move to types + // TODO: move to types raw_key, _ := m.ProposerKey.Raw() tmprivkey := tmed25519.PrivKey(raw_key) tmprivkey.PubKey().Bytes() diff --git a/block/state.go b/block/state.go index 1207df93a..35ca63c3d 100644 --- a/block/state.go +++ b/block/state.go @@ -116,7 +116,7 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) - //TODO: load consensus params from endblock? + // TODO: load consensus params from endblock? s.Validators = s.NextValidators.Copy() s.NextValidators = valSet.Copy() diff --git a/indexers/txindex/indexer_service.go b/indexers/txindex/indexer_service.go index 8fd936c57..0a619e758 100644 --- a/indexers/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -57,7 +57,7 @@ func (is *IndexerService) OnStart() error { go func() { for { msg := <-blockHeadersSub.Out() - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) + eventDataHeader, _ := msg.Data().(types.EventDataNewBlockHeader) height := eventDataHeader.Header.Height batch := NewBatch(eventDataHeader.NumTxs) diff --git a/mempool/cache.go b/mempool/cache.go index 3986cd585..78aefa3c4 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -76,7 +76,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { if c.list.Len() >= c.size { front := c.list.Front() if front != nil { - frontKey := front.Value.(types.TxKey) + frontKey, _ := front.Value.(types.TxKey) delete(c.cacheMap, frontKey) c.list.Remove(front) } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index f59b7fbdf..308390b3b 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -206,7 +206,7 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp if !txmp.cache.Push(tx) { // If the cached transaction is also in the pool, record its sender. if elt, ok := txmp.txByKey[txKey]; ok { - w := elt.Value.(*WrappedTx) + w, _ := elt.Value.(*WrappedTx) w.SetPeer(txInfo.SenderID) } return 0, mempool.ErrTxInCache diff --git a/rpc/client/client.go b/rpc/client/client.go index 98e83fcdf..246109564 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -314,7 +314,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) const limit int64 = 20 minHeight, maxHeight, err := filterMinMax( - 0, //FIXME: we might be pruned + 0, // FIXME: we might be pruned int64(c.node.GetBlockManagerHeight()), minHeight, maxHeight, diff --git a/store/pruning_test.go b/store/pruning_test.go index 850d4a9ad..43dcd774e 100644 --- a/store/pruning_test.go +++ b/store/pruning_test.go @@ -19,7 +19,6 @@ func TestStorePruning(t *testing.T) { to uint64 shouldError bool }{ - {"blocks with pruning", []*types.Block{ testutil.GetRandomBlock(1, 0), testutil.GetRandomBlock(2, 0), @@ -79,7 +78,7 @@ func TestStorePruning(t *testing.T) { // Validate only blocks in the range are pruned for k := range savedHeights { - if k >= c.from && k < c.to { //k < c.to is the exclusion test + if k >= c.from && k < c.to { // k < c.to is the exclusion test _, err := bstore.LoadBlock(k) assert.Error(err, "Block at height %d should be pruned", k) diff --git a/types/state.go b/types/state.go index c74e2871e..8e7afc2d0 100644 --- a/types/state.go +++ b/types/state.go @@ -53,7 +53,7 @@ func NewStateFromGenesis(genDoc *types.GenesisDoc) (*State, error) { // but leaves the Consensus.App version blank. // The Consensus.App version will be set during the Handshake, once // we hear from the app what protocol version it is running. - var InitStateVersion = tmstate.Version{ + InitStateVersion := tmstate.Version{ Consensus: tmversion.Consensus{ Block: version.BlockProtocol, App: 0,