From df525e7b68514ecbfae7ce6505b91fa1d73ea975 Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:50:18 +0000 Subject: [PATCH 1/4] remove comments --- block/balance.go | 2 +- block/block.go | 110 ++--- block/block_cache.go | 2 +- block/consensus.go | 2 +- block/executor.go | 44 +- block/fork.go | 98 ++-- block/fraud.go | 12 +- block/initchain.go | 12 +- block/manager.go | 146 +++--- block/modes.go | 40 +- block/p2p.go | 16 +- block/produce.go | 126 ++--- block/pruning.go | 16 +- block/retriever.go | 32 +- block/sequencers.go | 58 +-- block/slvalidator.go | 70 +-- block/state.go | 50 +- block/submit.go | 94 ++-- block/sync.go | 44 +- block/validate.go | 12 +- cmd/dymint/commands/init.go | 8 +- cmd/dymint/commands/root.go | 8 +- cmd/dymint/commands/show_node_id.go | 4 +- cmd/dymint/commands/show_sequencer.go | 4 +- cmd/dymint/commands/start.go | 12 +- cmd/dymint/main.go | 2 +- config/config.go | 62 +-- config/defaults.go | 10 +- config/flags.go | 10 +- config/p2p.go | 20 +- config/rpc.go | 46 +- config/toml.go | 16 +- conv/config.go | 15 +- conv/crypto.go | 2 +- da/avail/avail.go | 68 +-- da/celestia/celestia.go | 68 +-- da/celestia/config.go | 6 +- da/celestia/mock/messages.go | 22 +- da/celestia/mock/server.go | 8 +- da/celestia/rpc.go | 18 +- da/celestia/types/rpc.go | 6 +- da/celestia/types/types.go | 66 +-- da/da.go | 126 ++--- da/errors.go | 22 +- da/grpc/grpc.go | 30 +- da/grpc/mockserv/mockserv.go | 2 +- da/local/local.go | 36 +- da/registry/registry.go | 6 +- indexers/blockindexer/block.go | 14 +- indexers/blockindexer/kv/kv.go | 140 +++--- indexers/blockindexer/null/null.go | 2 +- indexers/blockindexer/query_range.go | 30 +- indexers/txindex/indexer.go | 26 +- indexers/txindex/indexer_service.go | 30 +- indexers/txindex/kv/kv.go | 206 ++++---- indexers/txindex/kv/utils.go | 2 +- indexers/txindex/null/null.go | 8 +- mempool/cache.go | 32 +- mempool/clist/clist.go | 148 +++--- mempool/ids.go | 2 +- mempool/mempool.go | 128 ++--- mempool/metrics.go | 40 +- mempool/mock/mempool.go | 2 +- mempool/tx.go | 12 +- mempool/v1/mempool.go | 448 +++++++++--------- mempool/v1/tx.go | 38 +- .../dymint/block/mock_ExecutorI.go | 124 ++--- .../dymint/block/mock_FraudHandler.go | 18 +- .../dymint/da/avail/mock_SubstrateApiI.go | 380 +++++++-------- .../celestia/types/mock_CelestiaRPCClient.go | 94 ++-- .../da/mock_DataAvailabilityLayerClient.go | 76 +-- .../dymint/p2p/mock_ProposerGetter.go | 20 +- .../dymint/p2p/mock_StateGetter.go | 20 +- .../settlement/dymension/mock_CosmosClient.go | 98 ++-- .../dymint/settlement/mock_ClientI.go | 142 +++--- .../dymensionxyz/dymint/store/mock_Store.go | 272 +++++------ .../sequencer/types/mock_QueryClient.go | 92 ++-- .../dymension/rollapp/mock_QueryClient.go | 128 ++--- .../dymension/sequencer/mock_QueryClient.go | 104 ++-- .../tendermint/abci/types/mock_Application.go | 110 ++--- .../tendermint/proxy/mock_AppConnConsensus.go | 60 +-- .../tendermint/proxy/mock_AppConns.go | 94 ++-- node/events/types.go | 12 +- node/mempool/mempool.go | 18 +- node/node.go | 46 +- p2p/block.go | 22 +- p2p/block_sync.go | 42 +- p2p/block_sync_dag.go | 30 +- p2p/blocks_received.go | 12 +- p2p/client.go | 140 +++--- p2p/events.go | 18 +- p2p/gossip.go | 26 +- p2p/validator.go | 22 +- rpc/client/client.go | 233 +++++---- rpc/client/utils.go | 12 +- rpc/json/handler.go | 30 +- rpc/json/service.go | 22 +- rpc/json/types.go | 22 +- rpc/json/ws.go | 8 +- rpc/middleware/client.go | 8 +- rpc/middleware/registry.go | 12 +- rpc/middleware/status.go | 2 +- rpc/server.go | 30 +- settlement/config.go | 6 +- settlement/dymension/cosmosclient.go | 10 +- settlement/dymension/dymension.go | 98 ++-- settlement/dymension/events.go | 20 +- settlement/dymension/options.go | 12 +- settlement/dymension/utils.go | 8 +- settlement/errors.go | 2 +- settlement/events.go | 14 +- settlement/grpc/grpc.go | 42 +- settlement/local/local.go | 50 +- settlement/registry/registry.go | 14 +- settlement/settlement.go | 64 +-- store/badger.go | 88 ++-- store/prefix.go | 26 +- store/pruning.go | 8 +- store/store.go | 48 +- store/storeIface.go | 54 +-- test/loadtime/cmd/load/main.go | 22 +- test/loadtime/cmd/report/main.go | 6 +- test/loadtime/payload/payload.go | 30 +- test/loadtime/report/report.go | 74 +-- testutil/block.go | 14 +- testutil/logger.go | 24 +- testutil/mocks.go | 52 +- testutil/node.go | 6 +- testutil/p2p.go | 6 +- testutil/rpc.go | 4 +- testutil/types.go | 26 +- types/batch.go | 20 +- types/block.go | 66 +-- types/block_source.go | 2 +- types/conv.go | 32 +- types/errors.go | 56 +-- types/evidence.go | 22 +- types/hashing.go | 4 +- types/instruction.go | 2 +- types/logger.go | 2 +- .../dymensionxyz/dymension/rollapp/errors.go | 6 +- .../dymensionxyz/dymension/rollapp/events.go | 4 +- .../pb/dymensionxyz/dymension/rollapp/keys.go | 10 +- .../dymension/rollapp/message_update_state.go | 10 +- .../dymensionxyz/dymension/rollapp/params.go | 2 +- .../dymension/sequencer/events.go | 14 +- .../dymensionxyz/dymension/sequencer/keys.go | 40 +- .../dymension/sequencer/params.go | 2 +- types/rollapp.go | 2 +- types/sequencer_set.go | 68 +-- types/serialization.go | 80 ++-- types/state.go | 38 +- types/tx.go | 14 +- types/validation.go | 12 +- utils/atomic/funcs.go | 8 +- utils/channel/funcs.go | 12 +- utils/errors/err_group.go | 14 +- utils/event/funcs.go | 10 +- utils/queue/queue.go | 16 +- utils/retry/backoff.go | 16 +- utils/retry/doc.go | 6 +- version/version.go | 2 +- 162 files changed, 3486 insertions(+), 3530 deletions(-) diff --git a/block/balance.go b/block/balance.go index 9c0e301fe..f77b518f3 100644 --- a/block/balance.go +++ b/block/balance.go @@ -14,7 +14,7 @@ import ( const CheckBalancesInterval = 3 * time.Minute -// MonitorBalances checks the balances of the node and updates the gauges for prometheus + func (m *Manager) MonitorBalances(ctx context.Context) error { ticker := time.NewTicker(CheckBalancesInterval) defer ticker.Stop() diff --git a/block/block.go b/block/block.go index 4b4562794..b8a6f3913 100644 --- a/block/block.go +++ b/block/block.go @@ -11,12 +11,12 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// applyBlockWithFraudHandling calls applyBlock and validateBlockBeforeApply with fraud handling. + func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { validateWithFraud := func() error { if err := m.validateBlockBeforeApply(block, commit); err != nil { m.blockCache.Delete(block.Header.Height) - // TODO: can we take an action here such as dropping the peer / reducing their reputation? + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) } @@ -29,27 +29,27 @@ func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types. err := validateWithFraud() if errors.Is(err, gerrc.ErrFault) { - // Here we handle the fault by calling the fraud handler. - // FraudHandler is an interface that defines a method to handle faults. Implement this interface to handle faults - // in specific ways. For example, once a fault is detected, it publishes a DataHealthStatus event to the - // pubsub which sets the node in a frozen state. + + + + m.FraudHandler.HandleFault(m.Ctx, err) } return err } -// applyBlock applies the block to the store and the abci app. -// Contract: block and commit must be validated before calling this function! -// steps: save block -> execute block with app -> update state -> commit block to app -> update state's height and commit result. -// As the entire process can't be atomic we need to make sure the following condition apply before -// - block height is the expected block height on the store (height + 1). -// - block height is the expected block height on the app (last block height + 1). + + + + + + func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { var retainHeight int64 - // TODO: add switch case to have defined behavior for each case. - // validate block height + + if block.Header.Height != m.State.NextHeight() { return types.ErrInvalidBlockHeight } @@ -58,13 +58,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("Applying block", "height", block.Header.Height, "source", blockMetaData.Source.String()) - // Check if the app's last block height is the same as the currently produced block height + isBlockAlreadyApplied, err := m.isHeightAlreadyApplied(block.Header.Height) if err != nil { return fmt.Errorf("check if block is already applied: %w", err) } - // In case the following true, it means we crashed after the app commit but before updating the state - // In that case we'll want to align the state with the app commit result, as if the block was applied. + + if isBlockAlreadyApplied { err := m.UpdateStateFromApp(block.Header.Hash()) if err != nil { @@ -73,7 +73,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Info("updated state from app commit", "height", block.Header.Height) } else { var appHash []byte - // Start applying the block assuming no inconsistency was found. + _, err = m.Store.SaveBlock(block, commit, nil) if err != nil { return fmt.Errorf("save block: %w", err) @@ -104,15 +104,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("add drs version: %w", err) } - // Commit block to app + appHash, retainHeight, err = m.Executor.Commit(m.State, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } - // Prune old heights, if requested by ABCI app. - // retainHeight is determined by currentHeight - min-retain-blocks (app.toml config). - // Unless max_age_num_blocks in consensus params is higher than min-retain-block, then max_age_num_blocks will be used instead of min-retain-blocks. + + + if 0 < retainHeight { select { @@ -121,25 +121,25 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("pruning channel full. skipping pruning", "retainHeight", retainHeight) } } - // Update the state with the new app hash, and store height from the commit. - // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. + + m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, block.Header.Hash()) } - // save last block time used to calculate batch skew time + m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) - // Update the store: - // 1. Save the proposer for the current height to the store. - // 2. Update the proposer in the state in case of rotation. - // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). - // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). - // here, (3) helps properly handle reboots (specifically when there's rotation). - // If reboot happens after block H (which rotates seqA -> seqB): - // - Block H+1 will be signed by seqB. - // - The state must have seqB as proposer. - - // Proposer cannot be empty while applying the block + + + + + + + + + + + proposer := m.State.GetProposer() if proposer == nil { return fmt.Errorf("logic error: got nil proposer while applying block") @@ -147,28 +147,28 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta batch := m.Store.NewBatch() - // 1. Save the proposer for the current height to the store. - // Proposer in the store is used for RPC queries. + + batch, err = m.Store.SaveProposer(block.Header.Height, *proposer, batch) if err != nil { return fmt.Errorf("save proposer: %w", err) } - // 2. Update the proposer in the state in case of rotation happened on the rollapp level (not necessarily on the hub yet). + isProposerUpdated := m.Executor.UpdateProposerFromBlock(m.State, m.Sequencers, block) - // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). + batch, err = m.Store.SaveState(m.State, batch) if err != nil { return fmt.Errorf("update state: %w", err) } - // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). - // The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary - // UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set - // is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, - // on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. - // Though single-time duplication it's not a big deal. + + + + + + if len(blockMetaData.SequencerSet) != 0 { batch, err = m.Store.SaveLastBlockSequencerSet(blockMetaData.SequencerSet, batch) if err != nil { @@ -185,16 +185,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.blockCache.Delete(block.Header.Height) - // validate whether configuration params and rollapp consensus params keep in line, after rollapp params are updated from the responses received in the block execution + err = m.ValidateConfigWithRollappParams() if err != nil { return err } - // Check if there was an Update for the proposer and if I am the new proposer. - // If so, restart so I can start as the proposer. - // For current proposer, we don't want to restart because we still need to send the last batch. - // This will be done as part of the `rotate` function. + + + + if isProposerUpdated && m.AmIProposerOnRollapp() { panic("I'm the new Proposer now. restarting as a proposer") } @@ -202,16 +202,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return nil } -// isHeightAlreadyApplied checks if the block height is already applied to the app. + func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return false, errorsmod.Wrap(err, "get app info") } - isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight //nolint:gosec // LastBlockHeight is always positive + isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight - // TODO: add switch case to validate better the current app state + return isBlockAlreadyApplied, nil } @@ -240,7 +240,7 @@ func (m *Manager) attemptApplyCachedBlocks() error { return nil } -// This function validates the block and commit against the state before applying it. + func (m *Manager) validateBlockBeforeApply(block *types.Block, commit *types.Commit) error { return types.ValidateProposedTransition(m.State, block, commit, m.State.GetProposerPubKey()) } diff --git a/block/block_cache.go b/block/block_cache.go index b224f69fc..b74176d9e 100644 --- a/block/block_cache.go +++ b/block/block_cache.go @@ -5,7 +5,7 @@ import ( ) type Cache struct { - // concurrency managed by Manager.retrieverMu mutex + cache map[uint64]types.CachedBlock } diff --git a/block/consensus.go b/block/consensus.go index 94ce55f15..87cc6c39d 100644 --- a/block/consensus.go +++ b/block/consensus.go @@ -47,7 +47,7 @@ func ConsensusMsgSigner(m proto.Message) (sdk.AccAddress, error) { } } -// ConsensusMsgsOnSequencerSetUpdate forms a list of consensus messages to handle the sequencer set update. + func ConsensusMsgsOnSequencerSetUpdate(newSequencers []types.Sequencer) ([]proto.Message, error) { msgs := make([]proto.Message, 0, len(newSequencers)) for _, s := range newSequencers { diff --git a/block/executor.go b/block/executor.go index f3a1421c5..9f7d72f8b 100644 --- a/block/executor.go +++ b/block/executor.go @@ -19,7 +19,7 @@ import ( protoutils "github.com/dymensionxyz/dymint/utils/proto" ) -// default minimum block max size allowed. not specific reason to set it to 10K, but we need to avoid no transactions can be included in a block. + const minBlockMaxBytes = 10000 type ExecutorI interface { @@ -33,7 +33,7 @@ type ExecutorI interface { UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool - /* Consensus Messages */ + AddConsensusMsgs(...proto2.Message) GetConsensusMsgs() []proto2.Message @@ -41,7 +41,7 @@ type ExecutorI interface { var _ ExecutorI = new(Executor) -// Executor creates and applies blocks and maintains state. + type Executor struct { localAddress []byte chainID string @@ -55,8 +55,8 @@ type Executor struct { logger types.Logger } -// NewExecutor creates new instance of BlockExecutor. -// localAddress will be used in sequencer mode only. + + func NewExecutor( localAddress []byte, chainID string, @@ -79,23 +79,23 @@ func NewExecutor( return &be, nil } -// AddConsensusMsgs adds new consensus msgs to the queue. -// The method is thread-safe. + + func (e *Executor) AddConsensusMsgs(msgs ...proto2.Message) { e.consensusMsgQueue.Add(msgs...) } -// GetConsensusMsgs dequeues consensus msgs from the queue. -// The method is thread-safe. + + func (e *Executor) GetConsensusMsgs() []proto2.Message { return e.consensusMsgQueue.Get() } -// InitChain calls InitChainSync using consensus connection to app. + func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string, valset []*tmtypes.Validator) (*abci.ResponseInitChain, error) { valUpdates := abci.ValidatorUpdates{} - // prepare the validator updates as expected by the ABCI app + for _, validator := range valset { tmkey, err := tmcrypto.PubKeyToProto(validator.PubKey) if err != nil { @@ -136,7 +136,7 @@ func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string }) } -// CreateBlock reaps transactions from mempool and builds a block. + func (e *Executor) CreateBlock( height uint64, lastCommit *types.Commit, @@ -144,8 +144,8 @@ func (e *Executor) CreateBlock( state *types.State, maxBlockDataSizeBytes uint64, ) *types.Block { - maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) //nolint:gosec // MaxBytes is always positive - mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) //nolint:gosec // size is always positive and falls in int64 + maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) + mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) block := &types.Block{ Header: types.Header{ @@ -178,7 +178,7 @@ func (e *Executor) CreateBlock( return block } -// Commit commits the block + func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { @@ -193,7 +193,7 @@ func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate. return appHash, retainHeight, nil } -// GetAppInfo returns the latest AppInfo from the proxyApp. + func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } @@ -214,7 +214,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas - err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) //nolint:gosec // height is non-negative and falls in int64 + err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) if err != nil { return nil, 0, err } @@ -224,7 +224,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* return resp.Data, resp.RetainHeight, err } -// ExecuteBlock executes the block and returns the ABCIResponses. Block should be valid (passed validation checks). + func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, error) { abciResponses := new(tmstate.ABCIResponses) abciResponses.DeliverTxs = make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) @@ -273,7 +273,7 @@ func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, err } } - abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) //nolint:gosec // height is non-negative and falls in int64 + abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) if err != nil { return nil, err } @@ -305,14 +305,14 @@ func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block for _, ev := range abciBlock.Evidence.Evidence { err = multierr.Append(err, e.eventBus.PublishEventNewEvidence(tmtypes.EventDataNewEvidence{ Evidence: ev, - Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(block.Header.Height), })) } for i, dtx := range resp.DeliverTxs { err = multierr.Append(err, e.eventBus.PublishEventTx(tmtypes.EventDataTx{ TxResult: abci.TxResult{ - Height: int64(block.Header.Height), //nolint:gosec // block height is within int64 range - Index: uint32(i), //nolint:gosec // num of deliver txs is less than 2^32 + Height: int64(block.Header.Height), + Index: uint32(i), Tx: abciBlock.Data.Txs[i], Result: *dtx, }, diff --git a/block/fork.go b/block/fork.go index c559ba132..5f1ff5878 100644 --- a/block/fork.go +++ b/block/fork.go @@ -20,9 +20,9 @@ const ( ForkMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." ) -// MonitorForkUpdateLoop monitors the hub for fork updates in a loop + func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { - ticker := time.NewTicker(ForkMonitorInterval) // TODO make this configurable + ticker := time.NewTicker(ForkMonitorInterval) defer ticker.Stop() for { @@ -37,7 +37,7 @@ func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { } } -// checkForkUpdate checks if the hub has a fork update + func (m *Manager) checkForkUpdate(msg string) error { defer m.forkMu.Unlock() m.forkMu.Lock() @@ -69,7 +69,7 @@ func (m *Manager) checkForkUpdate(msg string) error { return nil } -// createInstruction returns instruction with fork information + func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Instruction, error) { obsoleteDrs, err := m.SLClient.GetObsoleteDrs() if err != nil { @@ -85,11 +85,11 @@ func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Inst return instruction, nil } -// shouldStopNode determines if a rollapp node should be stopped based on revision criteria. -// -// This method checks two conditions to decide if a node should be stopped: -// 1. If the next state height is greater than or equal to the rollapp's revision start height. -// 2. If the block's app version (equivalent to revision) is less than the rollapp's revision + + + + + func shouldStopNode( expectedRevision types.Revision, nextHeight uint64, @@ -98,7 +98,7 @@ func shouldStopNode( return nextHeight >= expectedRevision.StartHeight && actualRevisionNumber < expectedRevision.Number } -// getRevisionFromSL returns revision data for the specific height + func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { rollapp, err := m.SLClient.GetRollapp() if err != nil { @@ -107,26 +107,26 @@ func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { return rollapp.GetRevisionForHeight(height), nil } -// doFork creates fork blocks and submits a new batch with them + func (m *Manager) doFork(instruction types.Instruction) error { - // if fork (two) blocks are not produced and applied yet, produce them + if m.State.Height() < instruction.RevisionStartHeight+1 { - // add consensus msgs to upgrade DRS to running node version (msg is created in all cases and RDK will upgrade if necessary). If returns error if running version is deprecated. + consensusMsgs, err := m.prepareDRSUpgradeMessages(instruction.FaultyDRS) if err != nil { return fmt.Errorf("prepare DRS upgrade messages: %v", err) } - // add consensus msg to bump the account sequences in all fork cases + consensusMsgs = append(consensusMsgs, &sequencers.MsgBumpAccountSequences{Authority: authtypes.NewModuleAddress("sequencers").String()}) - // create fork blocks + err = m.createForkBlocks(instruction, consensusMsgs) if err != nil { return fmt.Errorf("validate fork blocks: %v", err) } } - // submit fork batch including two fork blocks + if err := m.submitForkBatch(instruction.RevisionStartHeight); err != nil { return fmt.Errorf("submit fork batch: %v", err) } @@ -134,13 +134,13 @@ func (m *Manager) doFork(instruction types.Instruction) error { return nil } -// prepareDRSUpgradeMessages prepares consensus messages for DRS upgrades. -// It performs version validation and generates the necessary upgrade messages for the sequencer. -// -// The function implements the following logic: -// - If no faulty DRS version is provided (faultyDRS is nil), returns no messages -// - Validates the current DRS version against the potentially faulty version -// - Generates an upgrade message with the current valid DRS version + + + + + + + func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Message, error) { drsVersion, err := version.GetDRSVersion() if err != nil { @@ -161,13 +161,13 @@ func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Messa }, nil } -// create the first two blocks of the new revision -// the first one should have a cons message(s) -// both should not have tx's + + + func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs []proto.Message) error { nextHeight := m.State.NextHeight() - // Revise already created fork blocks + for h := instruction.RevisionStartHeight; h < nextHeight; h++ { b, err := m.Store.LoadBlock(h) if err != nil { @@ -183,7 +183,7 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs } } - // create two empty blocks including consensus msgs in the first one + for h := nextHeight; h < instruction.RevisionStartHeight+2; h++ { if h == instruction.RevisionStartHeight { m.Executor.AddConsensusMsgs(consensusMsgs...) @@ -201,13 +201,13 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs return nil } -// submitForkBatch verifies and, if necessary, creates a batch at the specified height. -// This function is critical for maintaining batch consistency in the blockchain while -// preventing duplicate batch submissions. -// -// The function performs the following operations: -// 1. Checks for an existing batch at the specified height via SLClient -// 2. If no batch exists, creates and submits a new one + + + + + + + func (m *Manager) submitForkBatch(height uint64) error { resp, err := m.SLClient.GetBatchAtHeight(height) if err != nil && !errors.Is(err, gerrc.ErrNotFound) { @@ -225,62 +225,62 @@ func (m *Manager) submitForkBatch(height uint64) error { return nil } -// updateStateForNextRevision updates dymint stored state in case next height corresponds to a new revision, to enable syncing (and validation) for rollapps with multiple revisions. + func (m *Manager) updateStateForNextRevision() error { - // in case fork is detected dymint state needs to be updated + - // get next revision according to node height + nextRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - // if next height is revision start height, update local state + if nextRevision.StartHeight == m.State.NextHeight() { - // Set proposer to nil to force updating it from SL + m.State.SetProposer(nil) - // Upgrade revision on state + m.State.RevisionStartHeight = nextRevision.StartHeight m.State.SetRevision(nextRevision.Number) - // update stored state + _, err = m.Store.SaveState(m.State, nil) return err } return nil } -// doForkWhenNewRevision creates and submit to SL fork blocks according to next revision start height. + func (m *Manager) doForkWhenNewRevision() error { defer m.forkMu.Unlock() m.forkMu.Lock() - // get revision next height + expectedRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - // create fork batch in case it has not been submitted yet + if m.LastSettlementHeight.Load() < expectedRevision.StartHeight { instruction, err := m.createInstruction(expectedRevision) if err != nil { return err } - // update revision with revision after fork + m.State.SetRevision(instruction.Revision) - // create and submit fork batch + err = m.doFork(instruction) if err != nil { return err } } - // this cannot happen. it means the revision number obtained is not the same or the next revision. unable to fork. + if expectedRevision.Number != m.State.GetRevision() { panic("Inconsistent expected revision number from Hub. Unable to fork") } - // remove instruction file after fork + return types.DeleteInstructionFromDisk(m.RootDir) } diff --git a/block/fraud.go b/block/fraud.go index 11a95c493..f543420eb 100644 --- a/block/fraud.go +++ b/block/fraud.go @@ -4,16 +4,16 @@ import ( "context" ) -// FraudHandler is an interface that defines a method to handle faults. -// Contract: should not be blocking. + + type FraudHandler interface { - // HandleFault handles a fault that occurred in the system. - // The fault is passed as an error type. + + HandleFault(ctx context.Context, fault error) } -// FreezeHandler is used to handle faults coming from executing and validating blocks. -// once a fault is detected, it publishes a DataHealthStatus event to the pubsub which sets the node in a frozen state. + + type FreezeHandler struct { m *Manager } diff --git a/block/initchain.go b/block/initchain.go index 604bbe3cb..48fea86a7 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -11,8 +11,8 @@ import ( ) func (m *Manager) RunInitChain() error { - // Get the proposer at the initial height. If we're at genesis the height will be 0. - proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) //nolint:gosec // height is non-negative and falls in int64 + + proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } @@ -25,13 +25,13 @@ func (m *Manager) RunInitChain() error { return err } - // validate the resulting genesis bridge data against the hub + err = m.ValidateGenesisBridgeData(res.GenesisBridgeDataBytes) if err != nil { return fmt.Errorf("Cannot validate genesis bridge data: %w. Please call `$EXECUTABLE dymint unsafe-reset-all` before the next launch to reset this node to genesis state.", err) } - // update the state with only the consensus pubkey + m.Executor.UpdateStateAfterInitChain(m.State, res) m.Executor.UpdateMempoolAfterInitChain(m.State) if _, err := m.Store.SaveState(m.State, nil); err != nil { @@ -41,8 +41,8 @@ func (m *Manager) RunInitChain() error { return nil } -// ValidateGenesisBridgeData validates the genesis bridge data from -// InitChainResponse against the rollapp genesis stored in the hub. + + func (m *Manager) ValidateGenesisBridgeData(dataBytes []byte) error { if len(dataBytes) == 0 { return fmt.Errorf("genesis bridge data is empty in InitChainResponse") diff --git a/block/manager.go b/block/manager.go index 61d74a6ab..06594e29d 100644 --- a/block/manager.go +++ b/block/manager.go @@ -36,99 +36,95 @@ import ( ) const ( - // RunModeProposer represents a node running as a proposer + RunModeProposer uint = iota - // RunModeFullNode represents a node running as a full node + RunModeFullNode ) -// Manager is responsible for aggregating transactions into blocks. + type Manager struct { logger types.Logger - // Configuration + Conf config.BlockManagerConfig Genesis *tmtypes.GenesisDoc GenesisChecksum string LocalKey crypto.PrivKey RootDir string - // Store and execution + Store store.Store State *types.State Executor ExecutorI - Sequencers *types.SequencerSet // Sequencers is the set of sequencers that are currently active on the rollapp + Sequencers *types.SequencerSet - // Clients and servers + Pubsub *pubsub.Server P2PClient *p2p.Client DAClient da.DataAvailabilityLayerClient SLClient settlement.ClientI - // RunMode represents the mode of the node. Set during initialization and shouldn't change after that. + RunMode uint - // context used when freezing node + Cancel context.CancelFunc Ctx context.Context - // LastBlockTimeInSettlement is the time of last submitted block, used to measure batch skew time + LastBlockTimeInSettlement atomic.Int64 - // LastBlockTime is the time of last produced block, used to measure batch skew time + LastBlockTime atomic.Int64 - // mutex used to avoid stopping node when fork is detected but proposer is creating/sending fork batch + forkMu sync.Mutex - /* - Sequencer and full-node - */ - // The last height which was submitted to settlement, that we know of. When we produce new batches, we will - // start at this height + 1. - // It is ALSO used by the producer, because the producer needs to check if it can prune blocks and it won't - // prune anything that might be submitted in the future. Therefore, it must be atomic. + + + + + LastSettlementHeight atomic.Uint64 - // channel used to send the retain height to the pruning background loop + pruningC chan int64 - // indexer + IndexerService *txindex.IndexerService - // used to fetch blocks from DA. Sequencer will only fetch batches in case it requires to re-sync (in case of rollback). Full-node will fetch batches for syncing and validation. + Retriever da.BatchRetriever - /* - Full-node only - */ - // Protect against processing two blocks at once when there are two routines handling incoming gossiped blocks, - // and incoming DA blocks, respectively. + + + retrieverMu sync.Mutex - // Cached blocks and commits, coming from P2P, for applying at future heights. The blocks may not be valid, because - // we can only do full validation in sequential order. + + blockCache *Cache - // TargetHeight holds the value of the current highest block seen from either p2p (probably higher) or the DA + TargetHeight atomic.Uint64 - // Fraud handler + FraudHandler FraudHandler - // channel used to signal the syncing loop when there is a new state update available + settlementSyncingC chan struct{} - // channel used to signal the validation loop when there is a new state update available + settlementValidationC chan struct{} - // notifies when the node has completed syncing + syncedFromSettlement *uchannel.Nudger - // validates all non-finalized state updates from settlement, checking there is consistency between DA and P2P blocks, and the information in the state update. + SettlementValidator *SettlementValidator } -// NewManager creates new block Manager. + func NewManager( localKey crypto.PrivKey, conf config.NodeConfig, @@ -155,7 +151,7 @@ func NewManager( mempool, proxyApp, eventBus, - NewConsensusMsgQueue(), // TODO properly specify ConsensusMsgStream: https://github.com/dymensionxyz/dymint/issues/1125 + NewConsensusMsgQueue(), logger, ) if err != nil { @@ -179,10 +175,10 @@ func NewManager( blockCache: &Cache{ cache: make(map[uint64]types.CachedBlock), }, - pruningC: make(chan int64, 10), // use of buffered channel to avoid blocking applyBlock thread. In case channel is full, pruning will be skipped, but the retain height can be pruned in the next iteration. - settlementSyncingC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing syncing process, but syncing height is updated, which means the ongoing syncing will sync to the new height. - settlementValidationC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing validation process, but validation height is updated, which means the ongoing validation will validate to the new height. - syncedFromSettlement: uchannel.NewNudger(), // used by the sequencer to wait till the node completes the syncing from settlement. + pruningC: make(chan int64, 10), + settlementSyncingC: make(chan struct{}, 1), + settlementValidationC: make(chan struct{}, 1), + syncedFromSettlement: uchannel.NewNudger(), } m.setFraudHandler(NewFreezeHandler(m)) err = m.LoadStateOnInit(store, genesis, logger) @@ -195,13 +191,13 @@ func NewManager( return nil, err } - // update dymint state with next revision info + err = m.updateStateForNextRevision() if err != nil { return nil, err } - // validate configuration params and rollapp consensus params are in line + err = m.ValidateConfigWithRollappParams() if err != nil { return nil, err @@ -212,10 +208,10 @@ func NewManager( return m, nil } -// Start starts the block manager. + func (m *Manager) Start(ctx context.Context) error { m.Ctx, m.Cancel = context.WithCancel(ctx) - // Check if InitChain flow is needed + if m.State.IsGenesis() { m.logger.Info("Running InitChain") @@ -225,9 +221,9 @@ func (m *Manager) Start(ctx context.Context) error { } } - // Check if a proposer on the rollapp is set. In case no proposer is set on the Rollapp, fallback to the hub proposer (If such exists). - // No proposer on the rollapp means that at some point there was no available proposer. - // In case there is also no proposer on the hub to our current height, it means that the chain is halted. + + + if m.State.GetProposer() == nil { m.logger.Info("No proposer on the rollapp, fallback to the hub proposer, if available") err := m.UpdateProposerFromSL() @@ -240,10 +236,10 @@ func (m *Manager) Start(ctx context.Context) error { } } - // checks if the the current node is the proposer either on rollapp or on the hub. - // In case of sequencer rotation, there's a phase where proposer rotated on Rollapp but hasn't yet rotated on hub. - // for this case, 2 nodes will get `true` for `AmIProposer` so the l2 proposer can produce blocks and the hub proposer can submit his last batch. - // The hub proposer, after sending the last state update, will panic and restart as full node. + + + + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -253,30 +249,30 @@ func (m *Manager) Start(ctx context.Context) error { m.logger.Info("starting block manager", "mode", map[bool]string{true: "proposer", false: "full node"}[amIProposer]) - // update local state from latest state in settlement + err = m.updateFromLastSettlementState() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - // send signal to syncing loop with last settlement state update + m.triggerSettlementSyncing() - // send signal to validation loop with last settlement state update + m.triggerSettlementValidation() eg, ctx := errgroup.WithContext(m.Ctx) - // Start the pruning loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.PruningLoop(ctx) }) - // Start the settlement sync loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementSyncLoop(ctx) }) - // Monitor sequencer set updates + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorSequencerSetUpdates(ctx) }) @@ -289,7 +285,7 @@ func (m *Manager) Start(ctx context.Context) error { return m.MonitorBalances(ctx) }) - // run based on the node role + if !amIProposer { return m.runAsFullNode(ctx, eg) } @@ -301,26 +297,26 @@ func (m *Manager) NextHeightToSubmit() uint64 { return m.LastSettlementHeight.Load() + 1 } -// updateFromLastSettlementState retrieves last sequencers and state update from the Hub and updates local state with it + func (m *Manager) updateFromLastSettlementState() error { - // Update sequencers list from SL + err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } - // update latest height from SL + latestHeight, err := m.SLClient.GetLatestHeight() if errors.Is(err, gerrc.ErrNotFound) { - // The SL hasn't got any batches for this chain yet. + m.logger.Info("No batches for chain found in SL.") - m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) //nolint:gosec // height is non-negative and falls in int64 + m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) m.LastBlockTimeInSettlement.Store(m.Genesis.GenesisTime.UTC().UnixNano()) return nil } if err != nil { - // TODO: separate between fresh rollapp and non-registered rollapp + return err } @@ -331,10 +327,10 @@ func (m *Manager) updateFromLastSettlementState() error { m.LastSettlementHeight.Store(latestHeight) - // init last block in settlement time in dymint state to calculate batch submit skew time + m.SetLastBlockTimeInSettlementFromHeight(latestHeight) - // init last block time in dymint state to calculate batch submit skew time + block, err := m.Store.LoadBlock(m.State.Height()) if err == nil { m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) @@ -343,7 +339,7 @@ func (m *Manager) updateFromLastSettlementState() error { } func (m *Manager) updateLastFinalizedHeightFromSettlement() error { - // update latest finalized height from SL + height, err := m.SLClient.GetLatestFinalizedHeight() if errors.Is(err, gerrc.ErrNotFound) { m.logger.Info("No finalized batches for chain found in SL.") @@ -372,7 +368,7 @@ func (m *Manager) UpdateTargetHeight(h uint64) { } } -// ValidateConfigWithRollappParams checks the configuration params are consistent with the params in the dymint state (e.g. DA and version) + func (m *Manager) ValidateConfigWithRollappParams() error { if da.Client(m.State.RollappParams.Da) != m.DAClient.GetClientType() { return fmt.Errorf("da client mismatch. rollapp param: %s da configured: %s", m.State.RollappParams.Da, m.DAClient.GetClientType()) @@ -385,7 +381,7 @@ func (m *Manager) ValidateConfigWithRollappParams() error { return nil } -// setDA initializes DA client in blockmanager according to DA type set in genesis or stored in state + func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) error { daLayer := m.State.RollappParams.Da dalc := registry.GetClient(daLayer) @@ -406,12 +402,12 @@ func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) err return nil } -// setFraudHandler sets the fraud handler for the block manager. + func (m *Manager) setFraudHandler(handler *FreezeHandler) { m.FraudHandler = handler } -// freezeNode sets the node as unhealthy and prevents the node continues producing and processing blocks + func (m *Manager) freezeNode(err error) { m.logger.Info("Freezing node", "err", err) if m.Ctx.Err() != nil { @@ -421,11 +417,11 @@ func (m *Manager) freezeNode(err error) { m.Cancel() } -// SetLastBlockTimeInSettlementFromHeight is used to initialize LastBlockTimeInSettlement from rollapp height in settlement + func (m *Manager) SetLastBlockTimeInSettlementFromHeight(lastSettlementHeight uint64) { block, err := m.Store.LoadBlock(lastSettlementHeight) if err != nil { - // if settlement height block is not found it will be updated after, when syncing + return } m.LastBlockTimeInSettlement.Store(block.Header.GetTimestamp().UTC().UnixNano()) diff --git a/block/modes.go b/block/modes.go index adfd56432..e8a48d33f 100644 --- a/block/modes.go +++ b/block/modes.go @@ -20,43 +20,43 @@ const ( p2pBlocksyncLoop = "applyBlockSyncBlocksLoop" ) -// setFraudHandler sets the fraud handler for the block manager. + func (m *Manager) runAsFullNode(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "full node") m.RunMode = RunModeFullNode - // update latest finalized height + err := m.updateLastFinalizedHeightFromSettlement() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - // Start the settlement validation loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementValidateLoop(ctx) }) m.subscribeFullNodeEvents(ctx) - // remove instruction file after fork to avoid enter fork loop again + return types.DeleteInstructionFromDisk(m.RootDir) } func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "proposer") m.RunMode = RunModeProposer - // Subscribe to batch events, to update last submitted height in case batch confirmation was lost. This could happen if the sequencer crash/restarted just after submitting a batch to the settlement and by the time we query the last batch, this batch wasn't accepted yet. + go uevent.MustSubscribe(ctx, m.Pubsub, "updateSubmittedHeightLoop", settlement.EventQueryNewSettlementBatchAccepted, m.UpdateLastSubmittedHeight, m.logger) - // Subscribe to P2P received blocks events (used for P2P syncing). + go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) - // Sequencer must wait till the DA light client is synced. Otherwise it will fail when submitting blocks. - // Full-nodes does not need to wait, but if it tries to fetch blocks from DA heights previous to the DA light client height it will fail, and it will retry till it reaches the height. + + m.DAClient.WaitForSyncing() - // Sequencer must wait till node is synced till last submittedHeight, in case it is not + m.waitForSettlementSyncing() - // it is checked again whether the node is the active proposer, since this could have changed after syncing. + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -65,28 +65,28 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { return fmt.Errorf("the node is no longer the proposer. please restart.") } - // update l2 proposer from SL in case it changed after syncing + err = m.UpdateProposerFromSL() if err != nil { return err } - // doForkWhenNewRevision executes fork if necessary + err = m.doForkWhenNewRevision() if err != nil { return err } - // check if we should rotate + shouldRotate, err := m.ShouldRotate() if err != nil { return fmt.Errorf("checking should rotate: %w", err) } if shouldRotate { - m.rotate(ctx) // panics afterwards + m.rotate(ctx) } - // populate the bytes produced channel + bytesProducedC := make(chan int) uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -94,18 +94,18 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { }) uerrors.ErrGroupGoLog(eg, m.logger, func() error { - bytesProducedC <- m.GetUnsubmittedBytes() // load unsubmitted bytes from previous run + bytesProducedC <- m.GetUnsubmittedBytes() return m.ProduceBlockLoop(ctx, bytesProducedC) }) - // Monitor and handling of the rotation + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorProposerRotation(ctx) }) go func() { err = eg.Wait() - // Check if loops exited due to sequencer rotation signal + if errors.Is(err, errRotationRequested) { m.rotate(ctx) } else if err != nil { @@ -118,11 +118,11 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { } func (m *Manager) subscribeFullNodeEvents(ctx context.Context) { - // Subscribe to new (or finalized) state updates events. + go uevent.MustSubscribe(ctx, m.Pubsub, syncLoop, settlement.EventQueryNewSettlementBatchAccepted, m.onNewStateUpdate, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, validateLoop, settlement.EventQueryNewSettlementBatchFinalized, m.onNewStateUpdateFinalized, m.logger) - // Subscribe to P2P received blocks events (used for P2P syncing). + go uevent.MustSubscribe(ctx, m.Pubsub, p2pGossipLoop, p2p.EventQueryNewGossipedBlock, m.OnReceivedBlock, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) } diff --git a/block/p2p.go b/block/p2p.go index 6dcae3c5e..c1c679dd3 100644 --- a/block/p2p.go +++ b/block/p2p.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// onReceivedBlock receives a block received event from P2P, saves the block to a cache and tries to apply the blocks from the cache. + func (m *Manager) OnReceivedBlock(event pubsub.Message) { eventData, ok := event.Data().(p2p.BlockData) if !ok { @@ -40,9 +40,9 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { if block.Header.Height < m.State.NextHeight() { return } - m.retrieverMu.Lock() // needed to protect blockCache access + m.retrieverMu.Lock() - // It is not strictly necessary to return early, for correctness, but doing so helps us avoid mutex pressure and unnecessary repeated attempts to apply cached blocks + if m.blockCache.Has(height) { m.retrieverMu.Unlock() return @@ -54,7 +54,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { m.logger.Debug("Received new block from p2p.", "block height", height, "source", source.String(), "store height", m.State.Height(), "n cachedBlocks", m.blockCache.Size()) m.blockCache.Add(height, &block, &commit, source) - m.retrieverMu.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant + m.retrieverMu.Unlock() err := m.attemptApplyCachedBlocks() if err != nil { @@ -63,7 +63,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { } } -// gossipBlock sends created blocks by the sequencer to full-nodes using P2P gossipSub + func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { m.logger.Info("Gossipping block", "height", block.Header.Height) gossipedBlock := p2p.BlockData{Block: block, Commit: commit} @@ -72,15 +72,15 @@ func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit typ return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) } if err := m.P2PClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { - // Although this boils down to publishing on a topic, we don't want to speculate too much on what - // could cause that to fail, so we assume recoverable. + + return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) } return nil } -// This function adds the block to blocksync store to enable P2P retrievability + func (m *Manager) saveP2PBlockToBlockSync(block *types.Block, commit *types.Commit) error { gossipedBlock := p2p.BlockData{Block: *block, Commit: *commit} gossipedBlockBytes, err := gossipedBlock.MarshalBinary() diff --git a/block/produce.go b/block/produce.go index 9a67fe77b..a2d4ffa64 100644 --- a/block/produce.go +++ b/block/produce.go @@ -20,9 +20,9 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// ProduceBlockLoop is calling publishBlock in a loop as long as we're synced. -// A signal will be sent to the bytesProduced channel for each block produced -// In this way it's possible to pause block production by not consuming the channel + + + func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) error { m.logger.Info("Started block producer loop.") @@ -40,12 +40,12 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) case <-ctx.Done(): return nil case <-ticker.C: - // Only produce if I'm the current rollapp proposer. + if !m.AmIProposerOnRollapp() { continue } - // if empty blocks are configured to be enabled, and one is scheduled... + produceEmptyBlock := firstBlock || m.Conf.MaxIdleTime == 0 || nextEmptyBlock.Before(time.Now()) firstBlock = false @@ -54,7 +54,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) m.logger.Error("Produce and gossip: context canceled.", "error", err) return nil } - if errors.Is(err, types.ErrEmptyBlock) { // occurs if the block was empty but we don't want to produce one + if errors.Is(err, types.ErrEmptyBlock) { continue } if errors.Is(err, ErrNonRecoverable) { @@ -68,8 +68,8 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) } nextEmptyBlock = time.Now().Add(m.Conf.MaxIdleTime) if 0 < len(block.Data.Txs) { - // the block wasn't empty so we want to make sure we don't wait too long before producing another one, in order to facilitate proofs for ibc - // TODO: optimize to only do this if IBC transactions are present (https://github.com/dymensionxyz/dymint/issues/709) + + nextEmptyBlock = time.Now().Add(m.Conf.MaxProofTime) } else { m.logger.Info("Produced empty block.") @@ -102,10 +102,10 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) type ProduceBlockOptions struct { AllowEmpty bool MaxData *uint64 - NextProposerHash *[32]byte // optional, used for last block + NextProposerHash *[32]byte } -// ProduceApplyGossipLastBlock produces and applies a block with the given NextProposerHash. + func (m *Manager) ProduceApplyGossipLastBlock(ctx context.Context, nextProposerHash [32]byte) (err error) { _, _, err = m.produceApplyGossip(ctx, ProduceBlockOptions{ AllowEmpty: true, @@ -119,22 +119,22 @@ func (m *Manager) ProduceApplyGossipBlock(ctx context.Context, opts ProduceBlock } func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptions) (block *types.Block, commit *types.Commit, err error) { - // Snapshot sequencer set to check if there are sequencer set updates. - // It fills the consensus messages queue for all the new sequencers. - // - // Note that there cannot be any recoverable errors between when the queue is filled and dequeued; - // otherwise, the queue may grow uncontrollably if there is a recoverable error loop in the middle. - // - // All errors in this method are non-recoverable. + + + + + + + newSequencerSet, err := m.SnapshotSequencerSet() if err != nil { return nil, nil, fmt.Errorf("snapshot sequencer set: %w", err) } - // We do not want to wait for a new block created to propagate a new sequencer set. - // Therefore, we force an empty block if there are any sequencer set updates. + + opts.AllowEmpty = opts.AllowEmpty || len(newSequencerSet) > 0 - // If I'm not the current rollapp proposer, I should not produce a blocks. + block, commit, err = m.produceBlock(opts) if err != nil { return nil, nil, fmt.Errorf("produce block: %w", err) @@ -151,50 +151,50 @@ func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptio return block, commit, nil } -// SnapshotSequencerSet loads two versions of the sequencer set: -// - the one that was used for the last block (from the store) -// - and the most recent one (from the manager memory) -// -// It then calculates the diff between the two and creates consensus messages for the new sequencers, -// i.e., only for the diff between two sets. If there is any diff (i.e., the sequencer set is updated), -// the method returns the entire new set. The new set will be used for next block and will be stored -// in the state instead of the old set after the block production. -// -// The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary -// UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set -// is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, -// on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. -// Though single-time duplication it's not a big deal. + + + + + + + + + + + + + + func (m *Manager) SnapshotSequencerSet() (sequencersAfterUpdate types.Sequencers, err error) { - // the most recent sequencer set + sequencersAfterUpdate = m.Sequencers.GetAll() - // the sequencer set that was used for the last block + lastSequencers, err := m.Store.LoadLastBlockSequencerSet() - // it's okay if the last sequencer set is not found, it can happen on genesis or after - // rotation from the full node to the proposer + + if err != nil && !errors.Is(err, gerrc.ErrNotFound) { - // unexpected error from the store is non-recoverable + return nil, fmt.Errorf("load last block sequencer set: %w: %w", err, ErrNonRecoverable) } - // diff between the two sequencer sets + newSequencers := types.SequencerListRightOuterJoin(lastSequencers, sequencersAfterUpdate) if len(newSequencers) == 0 { - // nothing to upsert, nothing to persist + return nil, nil } - // Create consensus msgs for new sequencers. - // It can fail only on decoding or internal errors this is non-recoverable. + + msgs, err := ConsensusMsgsOnSequencerSetUpdate(newSequencers) if err != nil { return nil, fmt.Errorf("consensus msgs on sequencers set update: %w: %w", err, ErrNonRecoverable) } m.Executor.AddConsensusMsgs(msgs...) - // return the entire new set if there is any update + return sequencersAfterUpdate, nil } @@ -202,18 +202,18 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := m.GetPreviousBlockHashes(newHeight) if err != nil { - // the error here is always non-recoverable, see GetPreviousBlockHashes() for details + return nil, nil, fmt.Errorf("load prev block: %w", err) } var block *types.Block var commit *types.Commit - // Check if there's an already stored block and commit at a newer height - // If there is use that instead of creating a new block + + pendingBlock, err := m.Store.LoadBlock(newHeight) if err == nil { - // Using an existing block + block = pendingBlock commit, err = m.Store.LoadCommit(newHeight) if err != nil { @@ -230,16 +230,16 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C maxBlockDataSize = *opts.MaxData } proposerHashForBlock := [32]byte(m.State.GetProposerHash()) - // if NextProposerHash is set, we create a last block + if opts.NextProposerHash != nil { maxBlockDataSize = 0 proposerHashForBlock = *opts.NextProposerHash } - // dequeue consensus messages for the new sequencers while creating a new block + block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, proposerHashForBlock, m.State, maxBlockDataSize) - // this cannot happen if there are any sequencer set updates - // AllowEmpty should be always true in this case + + if !opts.AllowEmpty && len(block.Data.Txs) == 0 { return nil, nil, fmt.Errorf("%w: %w", types.ErrEmptyBlock, ErrRecoverable) } @@ -255,7 +255,7 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C return block, commit, nil } -// create commit for block + func (m *Manager) createCommit(block *types.Block) (*types.Commit, error) { abciHeaderPb := types.ToABCIHeaderPB(&block.Header) abciHeaderBytes, err := abciHeaderPb.Marshal() @@ -290,7 +290,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, headerHash := block.Header.Hash() vote := tmtypes.Vote{ Type: cmtproto.PrecommitType, - Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(block.Header.Height), Round: 0, Timestamp: voteTimestamp, BlockID: tmtypes.BlockID{Hash: headerHash[:], PartSetHeader: tmtypes.PartSetHeader{ @@ -301,18 +301,18 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, ValidatorIndex: 0, } v := vote.ToProto() - // convert libp2p key to tm key - // TODO: move to types + + rawKey, _ := m.LocalKey.Raw() tmprivkey := tmed25519.PrivKey(rawKey) tmprivkey.PubKey().Bytes() - // Create a mock validator to sign the vote + tmvalidator := tmtypes.NewMockPVWithParams(tmprivkey, false, false) err := tmvalidator.SignVote(m.State.ChainID, v) if err != nil { return nil, err } - // Update the vote with the signature + vote.Signature = v.Signature pubKey := tmprivkey.PubKey() voteSignBytes := tmtypes.VoteSignBytes(m.State.ChainID, v) @@ -322,12 +322,12 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, return vote.Signature, nil } -// GetPreviousBlockHashes returns the hash of the last block and the commit for the last block -// to be used as the previous block hash and commit for the next block + + func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]byte, lastCommit *types.Commit, err error) { - lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) // prev height = forHeight - 1 + lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) if err != nil { - if !m.State.IsGenesis() { // allow prevBlock not to be found only on genesis + if !m.State.IsGenesis() { return [32]byte{}, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } lastHeaderHash = [32]byte{} @@ -336,7 +336,7 @@ func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]b return lastHeaderHash, lastCommit, nil } -// getHeaderHashAndCommit returns the Header Hash and Commit for a given height + func getHeaderHashAndCommit(store store.Store, height uint64) ([32]byte, *types.Commit, error) { lastCommit, err := store.LoadCommit(height) if err != nil { diff --git a/block/pruning.go b/block/pruning.go index 9a92451e9..9576938d1 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -4,9 +4,9 @@ import ( "context" ) -// Prune function prune all block related data from dymint store and blocksync store up to (but not including) retainHeight. + func (m *Manager) Prune(retainHeight uint64) { - // logging pruning result + logResult := func(err error, source string, retainHeight uint64, pruned uint64) { if err != nil { m.logger.Error("pruning", "from", source, "retain height", retainHeight, "err", err) @@ -15,20 +15,20 @@ func (m *Manager) Prune(retainHeight uint64) { } } - // prune blocks from blocksync store + pruned, err := m.P2PClient.RemoveBlocks(context.Background(), retainHeight) logResult(err, "blocksync", retainHeight, pruned) - // prune indexed block and txs and associated events + pruned, err = m.IndexerService.Prune(retainHeight, m.Store) logResult(err, "indexer", retainHeight, pruned) - // prune blocks from dymint store + pruned, err = m.Store.PruneStore(retainHeight, m.logger) logResult(err, "dymint store", retainHeight, pruned) } -//nolint:gosec // height is non-negative and falls in int64 + func (m *Manager) PruningLoop(ctx context.Context) error { for { select { @@ -36,9 +36,9 @@ func (m *Manager) PruningLoop(ctx context.Context) error { return nil case retainHeight := <-m.pruningC: var pruningHeight uint64 - if m.RunMode == RunModeProposer { // do not delete anything that we might submit in future + if m.RunMode == RunModeProposer { pruningHeight = min(m.NextHeightToSubmit(), uint64(retainHeight)) - } else { // do not delete anything that is not validated yet + } else { pruningHeight = min(m.SettlementValidator.NextValidationHeight(), uint64(retainHeight)) } m.Prune(pruningHeight) diff --git a/block/retriever.go b/block/retriever.go index 3475bd398..850a9ed9e 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -22,7 +22,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { m.retrieverMu.Lock() defer m.retrieverMu.Unlock() - // if batch blocks have already been applied skip, otherwise it will fail in endheight validation (it can happen when syncing from blocksync in parallel). + if m.State.Height() > slBatch.EndHeight { return nil } @@ -30,7 +30,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { blockIndex := 0 for _, batch := range batchResp.Batches { for i, block := range batch.Blocks { - // We dont apply a block if not included in the block descriptor (adds support for rollback) + if blockIndex >= len(slBatch.BlockDescriptors) { break } @@ -45,7 +45,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return err } - // We dont validate because validateBlockBeforeApply already checks if the block is already applied, and we don't need to fail there. + err := m.applyBlockWithFraudHandling(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) if err != nil { return fmt.Errorf("apply block: height: %d: %w", block.Header.Height, err) @@ -55,7 +55,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { } } - // validate the batch applied successfully and we are at the end height + if m.State.Height() != slBatch.EndHeight { return fmt.Errorf("state height mismatch: state height: %d: batch end height: %d", m.State.Height(), slBatch.EndHeight) } @@ -63,14 +63,14 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return nil } -// Used it when doing local rollback, and applying same blocks (instead of producing new ones) -// it was used for an edge case, eg: -// seq produced block H and gossiped -// bug in code produces app mismatch across nodes -// bug fixed, state rolled back to H-1 -// if seq produces new block H, it can lead to double signing, as the old block can still be in the p2p network -// ---- -// when this scenario encountered previously, we wanted to apply same block instead of producing new one + + + + + + + + func (m *Manager) applyLocalBlock() error { defer m.retrieverMu.Unlock() m.retrieverMu.Lock() @@ -101,7 +101,7 @@ func (m *Manager) applyLocalBlock() error { } func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - // Check DA client + if daMetaData.Client != m.DAClient.GetClientType() { return da.ResultRetrieveBatch{ BaseResult: da.BaseResult{ @@ -112,9 +112,9 @@ func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveB } } - // batchRes.MetaData includes proofs necessary to open disputes with the Hub + batchRes := m.Retriever.RetrieveBatches(daMetaData) - // TODO(srene) : for invalid transactions there is no specific error code since it will need to be validated somewhere else for fraud proving. - // NMT proofs (availRes.MetaData.Proofs) are included in the result batchRes, necessary to be included in the dispute + + return batchRes } diff --git a/block/sequencers.go b/block/sequencers.go index ca6155397..ab0597222 100644 --- a/block/sequencers.go +++ b/block/sequencers.go @@ -14,7 +14,7 @@ const ( var errRotationRequested = fmt.Errorf("sequencer rotation started. signal to stop production") func (m *Manager) MonitorProposerRotation(ctx context.Context) error { - ticker := time.NewTicker(ProposerMonitorInterval) // TODO: make this configurable + ticker := time.NewTicker(ProposerMonitorInterval) defer ticker.Stop() for { @@ -27,12 +27,12 @@ func (m *Manager) MonitorProposerRotation(ctx context.Context) error { m.logger.Error("Check rotation in progress", "err", err) continue } - // no rotation in progress + if nextProposer == nil { continue } - // we get here once a sequencer rotation signal is received + m.logger.Info("Sequencer rotation started.", "nextSeqAddr", nextProposer.SettlementAddress) return errRotationRequested } @@ -50,18 +50,18 @@ func (m *Manager) MonitorSequencerSetUpdates(ctx context.Context) error { case <-ticker.C: err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } } } } -// AmIProposerOnSL checks if the current node is the proposer on the hub -// Proposer on the Hub is not necessarily the proposer on the Rollapp during rotation phase. + + func (m *Manager) AmIProposerOnSL() (bool, error) { localProposerKeyBytes, _ := m.LocalKey.GetPublic().Raw() - // get hub proposer key + SLProposer, err := m.SLClient.GetProposerAtHeight(-1) if err != nil { return false, fmt.Errorf("get proposer at height: %w", err) @@ -69,8 +69,8 @@ func (m *Manager) AmIProposerOnSL() (bool, error) { return bytes.Equal(SLProposer.PubKey().Bytes(), localProposerKeyBytes), nil } -// AmIProposerOnRollapp checks if the current node is the proposer on the rollapp. -// Proposer on the rollapp is not necessarily the proposer on the hub during rotation phase. + + func (m *Manager) AmIProposerOnRollapp() bool { if m.State.GetProposer() == nil { return false @@ -81,8 +81,8 @@ func (m *Manager) AmIProposerOnRollapp() bool { return bytes.Equal(rollappProposer, localProposerKeyBytes) } -// ShouldRotate checks if the we are in the middle of rotation and we are the rotating proposer (i.e current proposer on the hub). -// We check it by checking if there is a "next" proposer on the hub which is not us. + + func (m *Manager) ShouldRotate() (bool, error) { nextProposer, err := m.SLClient.GetNextProposer() if err != nil { @@ -91,8 +91,8 @@ func (m *Manager) ShouldRotate() (bool, error) { if nextProposer == nil { return false, nil } - // At this point we know that there is a next proposer, - // so we should rotate only if we are the current proposer on the hub + + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return false, fmt.Errorf("am i proposer on SL: %w", err) @@ -100,13 +100,13 @@ func (m *Manager) ShouldRotate() (bool, error) { return amIProposerOnSL, nil } -// rotate rotates current proposer by doing the following: -// 1. Creating last block with the new proposer, which will stop him from producing blocks. -// 2. Submitting the last batch -// 3. Panicing so the node restarts as full node -// Note: In case he already created his last block, he will only try to submit the last batch. + + + + + func (m *Manager) rotate(ctx context.Context) { - // Get Next Proposer from SL. We assume such exists (even if empty proposer) otherwise function wouldn't be called. + nextProposer, err := m.SLClient.GetNextProposer() if err != nil || nextProposer == nil { panic(fmt.Sprintf("rotate: fetch next proposer set from Hub: %v", err)) @@ -127,8 +127,8 @@ func (m *Manager) rotate(ctx context.Context) { panic("rotate: sequencer is no longer the proposer. restarting as a full node") } -// CreateAndPostLastBatch creates and posts the last batch to the hub -// this called after manager shuts down the block producer and submitter + + func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]byte) error { h := m.State.Height() block, err := m.Store.LoadBlock(h) @@ -136,8 +136,8 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return fmt.Errorf("load block: height: %d: %w", h, err) } - // check if the last block already produced with NextProposerHash set. - // After creating the last block, the sequencer will be restarted so it will not be able to produce blocks anymore. + + if bytes.Equal(block.Header.NextSequencersHash[:], nextSeqHash[:]) { m.logger.Debug("Last block already produced and applied.") } else { @@ -147,7 +147,7 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by } } - // Submit all data accumulated thus far and the last state update + for { b, err := m.CreateAndSubmitBatch(m.Conf.BatchSubmitBytes, true) if err != nil { @@ -162,9 +162,9 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return nil } -// UpdateSequencerSetFromSL updates the sequencer set from the SL. The sequencer set is saved only in memory. -// It will be persisted to the store when the block is produced (only in the proposer mode). -// Proposer is not changed here. + + + func (m *Manager) UpdateSequencerSetFromSL() error { seqs, err := m.SLClient.GetAllSequencers() if err != nil { @@ -175,9 +175,9 @@ func (m *Manager) UpdateSequencerSetFromSL() error { return nil } -// UpdateProposerFromSL queries the hub and updates the local dymint state proposer at the current height + func (m *Manager) UpdateProposerFromSL() error { - SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) //nolint:gosec // height is non-negative and falls in int64 + SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } diff --git a/block/slvalidator.go b/block/slvalidator.go index bf9b8ac0a..700911dc5 100644 --- a/block/slvalidator.go +++ b/block/slvalidator.go @@ -13,14 +13,14 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// SettlementValidator validates batches from settlement layer with the corresponding blocks from DA and P2P. + type SettlementValidator struct { logger types.Logger blockManager *Manager lastValidatedHeight atomic.Uint64 } -// NewSettlementValidator returns a new StateUpdateValidator instance. + func NewSettlementValidator(logger types.Logger, blockManager *Manager) *SettlementValidator { lastValidatedHeight, err := blockManager.Store.LoadValidationHeight() if err != nil { @@ -36,13 +36,13 @@ func NewSettlementValidator(logger types.Logger, blockManager *Manager) *Settlem return validator } -// ValidateStateUpdate validates that the blocks from the state info are available in DA, -// that the information included in the Hub state info matches the blocks retrieved from DA -// and those blocks are the same that are obtained via P2P. + + + func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrieveBatch) error { v.logger.Debug("validating state update", "start height", batch.StartHeight, "end height", batch.EndHeight) - // loads blocks applied from P2P, if any. + p2pBlocks := make(map[uint64]*types.Block) for height := batch.StartHeight; height <= batch.EndHeight; height++ { source, err := v.blockManager.Store.LoadBlockSource(height) @@ -51,7 +51,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie continue } - // if block is not P2P block, skip + if source != types.Gossiped && source != types.BlockSync { continue } @@ -64,7 +64,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie p2pBlocks[block.Header.Height] = block } - // load all DA blocks from the batch to be validated + var daBatch da.ResultRetrieveBatch for { daBatch = v.blockManager.Retriever.RetrieveBatches(batch.MetaData.DA) @@ -72,18 +72,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie break } - // fraud detected in case blob is retrieved but unable to get blocks from it. + if errors.Is(daBatch.BaseResult.Error, da.ErrBlobNotParsed) { return types.NewErrStateUpdateBlobCorruptedFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - // fraud detected in case availability checks fail and therefore there certainty the blob, according to the state update DA path, is not available. + checkBatchResult := v.blockManager.Retriever.CheckBatchAvailability(batch.MetaData.DA) if errors.Is(checkBatchResult.Error, da.ErrBlobNotIncluded) { return types.NewErrStateUpdateBlobNotAvailableFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - // FIXME: how to handle non-happy case? not returning error? + continue } @@ -93,18 +93,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie types.LastReceivedDAHeightGauge.Set(float64(batch.EndHeight())) } - // validate DA blocks against the state update + err := v.ValidateDaBlocks(batch, daBlocks) if err != nil { return err } - // nothing to validate at P2P level, finish here. + if len(p2pBlocks) == 0 { return nil } - // validate P2P blocks against DA blocks + err = v.ValidateP2PBlocks(daBlocks, p2pBlocks) if err != nil { return err @@ -113,10 +113,10 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie return nil } -// ValidateP2PBlocks basically compares that the blocks applied from P2P are the same blocks included in the batch and retrieved from DA. -// Since DA blocks have been already validated against Hub state info block descriptors, if P2P blocks match with DA blocks, it means they are also validated against state info block descriptors. + + func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBlocks map[uint64]*types.Block) error { - // iterate over daBlocks and compare hashes with the corresponding block from P2P (if exists) to see whether they are actually the same block + for _, daBlock := range daBlocks { p2pBlock, ok := p2pBlocks[daBlock.Header.Height] @@ -140,9 +140,9 @@ func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBloc return nil } -// ValidateDaBlocks checks that the information included in the Hub state info (height, state roots and timestamps), correspond to the blocks obtained from DA. + func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetrieveBatch, daBlocks []*types.Block) error { - // we first verify the numblocks included in the state info match the block descriptors and the blocks obtained from DA + numSlBDs := uint64(len(slBatch.BlockDescriptors)) numSLBlocks := slBatch.NumBlocks numDABlocks := uint64(len(daBlocks)) @@ -150,36 +150,36 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return types.NewErrStateUpdateNumBlocksNotMatchingFraud(slBatch.EndHeight, numSLBlocks, numSLBlocks, numDABlocks) } - // we compare all DA blocks against the information included in the state info block descriptors + for i, bd := range slBatch.BlockDescriptors { - // height check + if bd.Height != daBlocks[i].Header.Height { return types.NewErrStateUpdateHeightNotMatchingFraud(slBatch.StateIndex, slBatch.BlockDescriptors[0].Height, daBlocks[0].Header.Height, slBatch.BlockDescriptors[len(slBatch.BlockDescriptors)-1].Height, daBlocks[len(daBlocks)-1].Header.Height) } - // we compare the state root between SL state info and DA block + if !bytes.Equal(bd.StateRoot, daBlocks[i].Header.AppHash[:]) { return types.NewErrStateUpdateStateRootNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.StateRoot, daBlocks[i].Header.AppHash[:]) } - // we compare the timestamp between SL state info and DA block + if !bd.Timestamp.Equal(daBlocks[i].Header.GetTimestamp()) { return types.NewErrStateUpdateTimestampNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.Timestamp, daBlocks[i].Header.GetTimestamp()) } - // we validate block descriptor drs version per height + err := v.validateDRS(slBatch.StateIndex, bd.Height, bd.DrsVersion) if err != nil { return err } } - // we compare the sequencer address between SL state info and DA block - // if next sequencer is not set, we check if the sequencer hash is equal to the next sequencer hash - // because it did not change. If the next sequencer is set, we check if the next sequencer hash is equal on the - // last block of the batch + + + + lastDABlock := daBlocks[numSlBDs-1] - // if lastDaBlock is previous block to fork, dont validate nextsequencerhash of last block because it will not match + if v.blockManager.State.RevisionStartHeight-1 == lastDABlock.Header.Height { v.logger.Debug("DA blocks, previous to fork, validated successfully", "start height", daBlocks[0].Header.Height, "end height", daBlocks[len(daBlocks)-1].Header.Height) return nil @@ -202,8 +202,8 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return nil } -// UpdateLastValidatedHeight sets the height saved in the Store if it is higher than the existing height -// returns OK if the value was updated successfully or did not need to be updated + + func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { for { curr := v.lastValidatedHeight.Load() @@ -217,17 +217,17 @@ func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { } } -// GetLastValidatedHeight returns the most last block height that is validated with settlement state updates. + func (v *SettlementValidator) GetLastValidatedHeight() uint64 { return v.lastValidatedHeight.Load() } -// NextValidationHeight returns the next height that needs to be validated with settlement state updates. + func (v *SettlementValidator) NextValidationHeight() uint64 { return v.lastValidatedHeight.Load() + 1 } -// validateDRS compares the DRS version stored for the specific height, obtained from rollapp params. + func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, version uint32) error { drs, err := v.blockManager.Store.LoadDRSVersion(height) if err != nil { @@ -240,7 +240,7 @@ func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, vers return nil } -// blockHash generates a hash from the block bytes to compare them + func blockHash(block *types.Block) ([]byte, error) { blockBytes, err := block.MarshalBinary() if err != nil { diff --git a/block/state.go b/block/state.go index 7b1991bc2..2d052de06 100644 --- a/block/state.go +++ b/block/state.go @@ -19,7 +19,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// LoadStateOnInit tries to load lastState from Store, and if it's not available it reads GenesisDoc. + func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) error { s, err := store.LoadState() if errors.Is(err, types.ErrNoStateFound) { @@ -36,18 +36,18 @@ func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc return nil } -// NewStateFromGenesis reads blockchain State from genesis. -// The active sequencer list will be set on InitChain + + func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { err := genDoc.ValidateAndComplete() if err != nil { return nil, fmt.Errorf("in genesis doc: %w", err) } - // InitStateVersion sets the Consensus.Block and Software versions, - // but leaves the Consensus.App version blank. - // The Consensus.App version will be set during the Handshake, once - // we hear from the app what protocol version it is running. + + + + InitStateVersion := tmstate.Version{ Consensus: tmversion.Consensus{ Block: version.BlockProtocol, @@ -59,7 +59,7 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { s := types.State{ Version: InitStateVersion, ChainID: genDoc.ChainID, - InitialHeight: uint64(genDoc.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: uint64(genDoc.InitialHeight), ConsensusParams: *genDoc.ConsensusParams, } s.SetHeight(0) @@ -73,29 +73,29 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { return &s, nil } -// UpdateStateFromApp is responsible for aligning the state of the store from the abci app + func (m *Manager) UpdateStateFromApp(blockHeaderHash [32]byte) error { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return errorsmod.Wrap(err, "get app info") } - appHeight := uint64(proxyAppInfo.LastBlockHeight) //nolint:gosec // height is non-negative and falls in int64 + appHeight := uint64(proxyAppInfo.LastBlockHeight) resp, err := m.Store.LoadBlockResponses(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } - // update the state with the app hashes created on the app commit + m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, blockHeaderHash) return nil } func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseInitChain) { - // If the app did not return an app hash, we keep the one set from the genesis doc in - // the state. We don't set appHash since we don't want the genesis doc app hash - // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. + + + if len(res.AppHash) > 0 { copy(s.AppHash[:], res.AppHash) } @@ -106,7 +106,7 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.ConsensusParams.Block.MaxGas = params.Block.MaxGas } } - // We update the last results hash with the empty hash, to conform with RFC-6962. + copy(s.LastResultsHash[:], merkle.HashFromByteSlices(nil)) } @@ -115,7 +115,7 @@ func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } -// UpdateStateAfterCommit updates the state with the app hash and last results hash + func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) @@ -132,26 +132,26 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp } } -// UpdateProposerFromBlock updates the proposer from the block -// The next proposer is defined in the block header (NextSequencersHash) -// TODO: (https://github.com/dymensionxyz/dymint/issues/1008) + + + func (e *Executor) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool { - // no sequencer change + if bytes.Equal(block.Header.SequencerHash[:], block.Header.NextSequencersHash[:]) { return false } if block.Header.NextSequencersHash == [32]byte{} { - // the chain will be halted until proposer is set - // TODO: recover from halt (https://github.com/dymensionxyz/dymint/issues/1021) + + e.logger.Info("rollapp left with no proposer. chain is halted") s.SetProposer(nil) return true } - // if hash changed, update the proposer - // We assume here that we're updated with the latest sequencer set - // FIXME: Think how to handle not being updated with the latest sequencer set + + + seq, found := seqSet.GetByHash(block.Header.NextSequencersHash[:]) if !found { e.logger.Error("cannot find proposer by hash") diff --git a/block/submit.go b/block/submit.go index 3ee4e2dc4..87150c3c9 100644 --- a/block/submit.go +++ b/block/submit.go @@ -17,11 +17,11 @@ import ( uchannel "github.com/dymensionxyz/dymint/utils/channel" ) -// SubmitLoop is the main loop for submitting blocks to the DA and SL layers. -// It submits a batch when either -// 1) It accumulates enough block data, so it's necessary to submit a batch to avoid exceeding the max size -// 2) Enough time passed since the last submitted batch, so it's necessary to submit a batch to avoid exceeding the max time -// It will back pressure (pause) block production if it falls too far behind. + + + + + func (m *Manager) SubmitLoop(ctx context.Context, bytesProduced chan int, ) (err error) { @@ -39,41 +39,41 @@ func (m *Manager) SubmitLoop(ctx context.Context, ) } -// SubmitLoopInner is a unit testable impl of SubmitLoop + func SubmitLoopInner( ctx context.Context, logger types.Logger, - bytesProduced chan int, // a channel of block and commit bytes produced - maxSkewTime time.Duration, // max time between last submitted block and last produced block allowed. if this threshold is reached block production is stopped. - unsubmittedBlocksNum func() uint64, // func that returns the amount of non-submitted blocks - unsubmittedBlocksBytes func() int, // func that returns bytes from non-submitted blocks - batchSkewTime func() time.Duration, // func that returns measured time between last submitted block and last produced block - maxBatchSubmitTime time.Duration, // max time to allow between batches - maxBatchSubmitBytes uint64, // max size of serialised batch in bytes + bytesProduced chan int, + maxSkewTime time.Duration, + unsubmittedBlocksNum func() uint64, + unsubmittedBlocksBytes func() int, + batchSkewTime func() time.Duration, + maxBatchSubmitTime time.Duration, + maxBatchSubmitBytes uint64, createAndSubmitBatch func(maxSizeBytes uint64) (bytes uint64, err error), ) error { eg, ctx := errgroup.WithContext(ctx) pendingBytes := atomic.Uint64{} - trigger := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on trigger thread - submitter := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on submitter thread + trigger := uchannel.NewNudger() + submitter := uchannel.NewNudger() eg.Go(func() error { - // 'trigger': this thread is responsible for waking up the submitter when a new block arrives, and back-pressures the block production loop - // if it gets too far ahead. + + for { select { case <-ctx.Done(): return nil case n := <-bytesProduced: - pendingBytes.Add(uint64(n)) //nolint:gosec // bytes size is always positive + pendingBytes.Add(uint64(n)) logger.Debug("Added bytes produced to bytes pending submission counter.", "bytes added", n, "pending", pendingBytes.Load()) } submitter.Nudge() - // if the time between the last produced block and last submitted is greater than maxSkewTime we block here until we get a progress nudge from the submitter thread + if maxSkewTime < batchSkewTime() { select { case <-ctx.Done(): @@ -86,7 +86,7 @@ func SubmitLoopInner( }) eg.Go(func() error { - // 'submitter': this thread actually creates and submits batches. this thread is woken up every batch_submit_time (in addition to every block produced) to check if there is anything to submit even if no new blocks have been produced + ticker := time.NewTicker(maxBatchSubmitTime) for { select { @@ -98,7 +98,7 @@ func SubmitLoopInner( pending := pendingBytes.Load() - // while there are accumulated blocks, create and submit batches!! + for { done := ctx.Err() != nil nothingToSubmit := pending == 0 @@ -119,22 +119,22 @@ func SubmitLoopInner( logger.Error("Create and submit batch", "err", err, "pending", pending) panic(err) } - // this could happen if we timed-out waiting for acceptance in the previous iteration, but the batch was indeed submitted. - // we panic here cause restarting may reset the last batch submitted counter and the sequencer can potentially resume submitting batches. + + if errors.Is(err, gerrc.ErrAlreadyExists) { logger.Debug("Batch already accepted", "err", err, "pending", pending) panic(err) } return err } - pending = uint64(unsubmittedBlocksBytes()) //nolint:gosec // bytes size is always positive - // after new batch submitted we check the skew time to wake up 'trigger' thread and restart block production + pending = uint64(unsubmittedBlocksBytes()) + if batchSkewTime() < maxSkewTime { trigger.Nudge() } logger.Debug("Submitted a batch to both sub-layers.", "n bytes consumed from pending", nConsumed, "pending after", pending, "skew time", batchSkewTime()) } - // update pendingBytes with non submitted block bytes after all pending batches have been submitted + pendingBytes.Store(pending) } }) @@ -142,25 +142,25 @@ func SubmitLoopInner( return eg.Wait() } -// CreateAndSubmitBatchGetSizeBlocksCommits creates and submits a batch to the DA and SL. -// Returns size of block and commit bytes -// max size bytes is the maximum size of the serialized batch type + + + func (m *Manager) CreateAndSubmitBatchGetSizeBlocksCommits(maxSize uint64) (uint64, error) { b, err := m.CreateAndSubmitBatch(maxSize, false) if b == nil { return 0, err } - return uint64(b.SizeBlockAndCommitBytes()), err //nolint:gosec // size is always positive and falls in uint64 + return uint64(b.SizeBlockAndCommitBytes()), err } -// CreateAndSubmitBatch creates and submits a batch to the DA and SL. -// max size bytes is the maximum size of the serialized batch type + + func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*types.Batch, error) { startHeight := m.NextHeightToSubmit() endHeightInclusive := m.State.Height() if endHeightInclusive < startHeight { - // TODO: https://github.com/dymensionxyz/dymint/issues/999 + return nil, fmt.Errorf( "next height to submit is greater than last block height, create and submit batch should not have been called: start height: %d: end height inclusive: %d: %w", startHeight, @@ -173,7 +173,7 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty if err != nil { return nil, fmt.Errorf("create batch: %w", err) } - // This is the last batch, so we need to mark it as such + if lastBatch && b.EndHeight() == endHeightInclusive { b.LastBatch = true } @@ -187,8 +187,8 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty return b, nil } -// CreateBatch looks through the store for any unsubmitted blocks and commits and bundles them into a batch -// max size bytes is the maximum size of the serialized batch type + + func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeightInclusive uint64) (*types.Batch, error) { batchSize := endHeightInclusive - startHeight + 1 batch := &types.Batch{ @@ -211,7 +211,7 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight return nil, fmt.Errorf("load drs version: h: %d: %w", h, err) } - // check all blocks have the same revision + if len(batch.Blocks) > 0 && batch.Blocks[len(batch.Blocks)-1].GetRevision() != block.GetRevision() { return nil, fmt.Errorf("create batch: batch includes blocks with different revisions: %w", gerrc.ErrInternal) } @@ -221,9 +221,9 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight batch.DRSVersion = append(batch.DRSVersion, drsVersion) totalSize := batch.SizeBytes() - if maxBatchSize < uint64(totalSize) { //nolint:gosec // size is always positive and falls in uint64 + if maxBatchSize < uint64(totalSize) { - // Remove the last block and commit from the batch + batch.Blocks = batch.Blocks[:len(batch.Blocks)-1] batch.Commits = batch.Commits[:len(batch.Commits)-1] batch.DRSVersion = batch.DRSVersion[:len(batch.DRSVersion)-1] @@ -256,19 +256,17 @@ func (m *Manager) SubmitBatch(batch *types.Batch) error { types.RollappHubHeightGauge.Set(float64(batch.EndHeight())) m.LastSettlementHeight.Store(batch.EndHeight()) - // update last submitted block time with batch last block (used to calculate max skew time) + m.LastBlockTimeInSettlement.Store(batch.Blocks[len(batch.Blocks)-1].Header.GetTimestamp().UTC().UnixNano()) return err } -// GetUnsubmittedBytes returns the total number of unsubmitted bytes produced an element on a channel -// Intended only to be used at startup, before block production and submission loops start + + func (m *Manager) GetUnsubmittedBytes() int { total := 0 - /* - On node start we want to include the count of any blocks which were produced and not submitted in a previous instance - */ + currH := m.State.Height() for h := m.NextHeightToSubmit(); h <= currH; h++ { @@ -296,8 +294,8 @@ func (m *Manager) GetUnsubmittedBlocks() uint64 { return m.State.Height() - m.LastSettlementHeight.Load() } -// UpdateLastSubmittedHeight will update last height submitted height upon events. -// This may be necessary in case we crashed/restarted before getting response for our submission to the settlement layer. + + func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -314,7 +312,7 @@ func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { } } -// GetBatchSkewTime returns the time between the last produced block and the last block submitted to SL + func (m *Manager) GetBatchSkewTime() time.Duration { lastProducedTime := time.Unix(0, m.LastBlockTime.Load()) lastSubmittedTime := time.Unix(0, m.LastBlockTimeInSettlement.Load()) diff --git a/block/sync.go b/block/sync.go index 9c3605669..bef64587e 100644 --- a/block/sync.go +++ b/block/sync.go @@ -12,7 +12,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) -// onNewStateUpdate will update the last submitted height and will update sequencers list from SL. After, it triggers syncing or validation, depending whether it needs to sync first or only validate. + func (m *Manager) onNewStateUpdate(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -20,32 +20,32 @@ func (m *Manager) onNewStateUpdate(event pubsub.Message) { return } - // Update heights based on state update end height + m.LastSettlementHeight.Store(eventData.EndHeight) - // Update sequencers list from SL + err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } if eventData.EndHeight > m.State.Height() { - // trigger syncing from settlement last state update. + m.triggerSettlementSyncing() - // update target height used for syncing status rpc + m.UpdateTargetHeight(eventData.EndHeight) } else { - // trigger validation of the last state update available in settlement + m.triggerSettlementValidation() } } -// SettlementSyncLoop listens for syncing triggers which indicate new settlement height updates, and attempts to sync to the last seen settlement height. -// Syncing triggers can be called when a new settlement state update event arrives or explicitly from the `updateFromLastSettlementState` method which is only being called upon startup. -// Upon new trigger, we know the settlement reached a new height we haven't seen before so a validation signal is sent to validate the settlement batch. -// Note: even when a sync is triggered, there is no guarantee that the batch will be applied from settlement as there is a race condition with the p2p/blocksync for syncing. + + + + func (m *Manager) SettlementSyncLoop(ctx context.Context) error { for { select { @@ -55,12 +55,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("syncing to target height", "targetHeight", m.LastSettlementHeight.Load()) for currH := m.State.NextHeight(); currH <= m.LastSettlementHeight.Load(); currH = m.State.NextHeight() { - // if context has been cancelled, stop syncing + if ctx.Err() != nil { return nil } - // if we have the block locally, we don't need to fetch it from the DA. - // it will only happen in case of rollback. + + err := m.applyLocalBlock() if err == nil { m.logger.Info("Synced from local", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) @@ -76,12 +76,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } m.logger.Info("Retrieved state update from SL.", "state_index", settlementBatch.StateIndex) - // we update LastBlockTimeInSettlement to be able to measure batch skew time with last block time in settlement + m.LastBlockTimeInSettlement.Store(settlementBatch.BlockDescriptors[len(settlementBatch.BlockDescriptors)-1].GetTimestamp().UTC().UnixNano()) err = m.ApplyBatchFromSL(settlementBatch.Batch) - // this will keep sync loop alive when DA is down or retrievals are failing because DA issues. + if errors.Is(err, da.ErrRetrieval) { continue } @@ -91,7 +91,7 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("Synced from DA", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) - // trigger state update validation, after each state update is applied + m.triggerSettlementValidation() err = m.attemptApplyCachedBlocks() @@ -101,10 +101,10 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } - // avoid notifying as synced in case it fails before + if m.State.Height() >= m.LastSettlementHeight.Load() { m.logger.Info("Synced.", "current height", m.State.Height(), "last submitted height", m.LastSettlementHeight.Load()) - // nudge to signal to any listens that we're currently synced with the last settlement height we've seen so far + m.syncedFromSettlement.Nudge() } @@ -112,14 +112,14 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } } -// waitForSyncing waits for synced nudge (in case it needs to because it was syncing) + func (m *Manager) waitForSettlementSyncing() { if m.State.Height() < m.LastSettlementHeight.Load() { <-m.syncedFromSettlement.C } } -// triggerStateUpdateSyncing sends signal to channel used by syncing loop + func (m *Manager) triggerSettlementSyncing() { select { case m.settlementSyncingC <- struct{}{}: @@ -128,7 +128,7 @@ func (m *Manager) triggerSettlementSyncing() { } } -// triggerStateUpdateValidation sends signal to channel used by validation loop + func (m *Manager) triggerSettlementValidation() { select { case m.settlementValidationC <- struct{}{}: diff --git a/block/validate.go b/block/validate.go index d2a86d07f..e4078fe8a 100644 --- a/block/validate.go +++ b/block/validate.go @@ -11,8 +11,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// onNewStateUpdateFinalized will update the last validated height with the last finalized height. -// Unlike pending heights, once heights are finalized, we treat them as validated as there is no point validating finalized heights. + + func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -22,7 +22,7 @@ func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { m.SettlementValidator.UpdateLastValidatedHeight(eventData.EndHeight) } -// SettlementValidateLoop listens for syncing events (from new state update or from initial syncing) and validates state updates to the last submitted height. + func (m *Manager) SettlementValidateLoop(ctx context.Context) error { for { select { @@ -33,14 +33,14 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { m.logger.Info("validating state updates to target height", "targetHeight", targetValidationHeight) for currH := m.SettlementValidator.NextValidationHeight(); currH <= targetValidationHeight; currH = m.SettlementValidator.NextValidationHeight() { - // get next batch that needs to be validated from SL + batch, err := m.SLClient.GetBatchAtHeight(currH) if err != nil { uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) return err } - // validate batch + err = m.SettlementValidator.ValidateStateUpdate(batch) if err != nil { if errors.Is(err, gerrc.ErrFault) { @@ -51,7 +51,7 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { return err } - // update the last validated height to the batch last block height + m.SettlementValidator.UpdateLastValidatedHeight(batch.EndHeight) m.logger.Debug("state info validated", "lastValidatedHeight", m.SettlementValidator.GetLastValidatedHeight()) diff --git a/cmd/dymint/commands/init.go b/cmd/dymint/commands/init.go index 9587731fd..ce3ee91e3 100644 --- a/cmd/dymint/commands/init.go +++ b/cmd/dymint/commands/init.go @@ -14,7 +14,7 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" ) -// InitFilesCmd initialises a fresh Dymint Core instance. + var InitFilesCmd = &cobra.Command{ Use: "init", Short: "Initialize Dymint", @@ -25,9 +25,9 @@ func initFiles(cmd *cobra.Command, args []string) error { return InitFilesWithConfig(tmconfig) } -// InitFilesWithConfig initialises a fresh Dymint instance. + func InitFilesWithConfig(config *cfg.Config) error { - // private validator + privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() var pv *privval.FilePV @@ -52,7 +52,7 @@ func InitFilesWithConfig(config *cfg.Config) error { logger.Info("Generated node key", "path", nodeKeyFile) } - // genesis file + genFile := config.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) diff --git a/cmd/dymint/commands/root.go b/cmd/dymint/commands/root.go index af981f80e..8db70aedc 100644 --- a/cmd/dymint/commands/root.go +++ b/cmd/dymint/commands/root.go @@ -28,8 +28,8 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", tmconfig.LogLevel, "log level") } -// ParseConfig retrieves the default environment configuration, -// sets up the Dymint root and ensures that the root exists + + func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) @@ -60,14 +60,14 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { return conf, nil } -// RootCmd is the root command for Dymint core. + var RootCmd = &cobra.Command{ Use: "dymint", Short: "ABCI-client implementation for dymension's autonomous rollapps", PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { v := viper.GetViper() - // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := v.BindPFlags(cmd.Flags()); err != nil { return err } diff --git a/cmd/dymint/commands/show_node_id.go b/cmd/dymint/commands/show_node_id.go index 1ca1b3322..30d3c9e87 100644 --- a/cmd/dymint/commands/show_node_id.go +++ b/cmd/dymint/commands/show_node_id.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// ShowNodeIDCmd dumps node's ID to the standard output. + var ShowNodeIDCmd = &cobra.Command{ Use: "show-node-id", Aliases: []string{"show_node_id"}, @@ -27,7 +27,7 @@ func showNodeID(cmd *cobra.Command, args []string) error { if err != nil { return err } - // convert nodeKey to libp2p key + host, err := libp2p.New(libp2p.Identity(signingKey)) if err != nil { return err diff --git a/cmd/dymint/commands/show_sequencer.go b/cmd/dymint/commands/show_sequencer.go index cb6e72955..2faff6840 100644 --- a/cmd/dymint/commands/show_sequencer.go +++ b/cmd/dymint/commands/show_sequencer.go @@ -9,13 +9,13 @@ import ( "github.com/tendermint/tendermint/privval" ) -// ShowSequencer adds capabilities for showing the validator info. + var ShowSequencer = &cobra.Command{ Use: "show-sequencer", Aliases: []string{"show_sequencer"}, Short: "Show this node's sequencer info", RunE: showSequencer, - // PreRun: deprecateSnakeCase, + } func showSequencer(cmd *cobra.Command, args []string) error { diff --git a/cmd/dymint/commands/start.go b/cmd/dymint/commands/start.go index 3bfa6e503..1615ff2cd 100644 --- a/cmd/dymint/commands/start.go +++ b/cmd/dymint/commands/start.go @@ -32,8 +32,8 @@ import ( var genesisHash []byte -// NewRunNodeCmd returns the command that allows the CLI to start a node. -// It can be used with a custom PrivValidator and in-process ABCI application. + + func NewRunNodeCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", @@ -125,7 +125,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L logger.Info("Started dymint node") - // Stop upon receiving SIGTERM or CTRL-C. + tmos.TrapSignal(logger, func() { logger.Info("Caught SIGTERM. Exiting...") if dymintNode.IsRunning() { @@ -135,7 +135,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L } }) - // Run forever. + select {} } @@ -148,7 +148,7 @@ func checkGenesisHash(config *tmcfg.Config) error { return nil } - // Calculate SHA-256 hash of the genesis file. + f, err := os.Open(config.GenesisFile()) if err != nil { return fmt.Errorf("can't open genesis file: %w", err) @@ -164,7 +164,7 @@ func checkGenesisHash(config *tmcfg.Config) error { } actualHash := h.Sum(nil) - // Compare with the flag. + if !bytes.Equal(genesisHash, actualHash) { return fmt.Errorf( "--genesis_hash=%X does not match %s hash: %X", diff --git a/cmd/dymint/main.go b/cmd/dymint/main.go index 631383649..200c33f82 100644 --- a/cmd/dymint/main.go +++ b/cmd/dymint/main.go @@ -20,7 +20,7 @@ func main() { cli.NewCompletionCmd(rootCmd, true), ) - // Create & start node + rootCmd.AddCommand(commands.NewRunNodeCmd()) cmd := cli.PrepareBaseCmd(rootCmd, "DM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultDymintDir))) diff --git a/config/config.go b/config/config.go index c19c58277..65b9e09e3 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,7 @@ import ( ) const ( - // DefaultDymintDir is the default directory for dymint + DefaultDymintDir = ".dymint" DefaultConfigDirName = "config" DefaultConfigFileName = "dymint.toml" @@ -23,63 +23,63 @@ const ( MaxBatchSubmitTime = 1 * time.Hour ) -// NodeConfig stores Dymint node configuration. + type NodeConfig struct { - // parameters below are translated from existing config + RootDir string DBPath string RPC RPCConfig MempoolConfig tmcfg.MempoolConfig - // parameters below are dymint specific and read from config + BlockManagerConfig `mapstructure:",squash"` DAConfig string `mapstructure:"da_config"` SettlementLayer string `mapstructure:"settlement_layer"` SettlementConfig settlement.Config `mapstructure:",squash"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` - // Config params for mock grpc da + DAGrpc grpc.Config `mapstructure:",squash"` - // P2P Options + P2PConfig `mapstructure:",squash"` - // DB Options + DBConfig `mapstructure:"db"` } -// BlockManagerConfig consists of all parameters required by BlockManagerConfig + type BlockManagerConfig struct { - // BlockTime defines how often new blocks are produced + BlockTime time.Duration `mapstructure:"block_time"` - // MaxIdleTime defines how long should block manager wait for new transactions before producing empty block + MaxIdleTime time.Duration `mapstructure:"max_idle_time"` - // MaxProofTime defines the max time to be idle, if txs that requires proof were included in last block + MaxProofTime time.Duration `mapstructure:"max_proof_time"` - // BatchSubmitMaxTime is how long should block manager wait for before submitting batch + BatchSubmitTime time.Duration `mapstructure:"batch_submit_time"` - // MaxSkewTime is the number of batches waiting to be submitted. Block production will be paused if this limit is reached. + MaxSkewTime time.Duration `mapstructure:"max_skew_time"` - // The size of the batch of blocks and commits in Bytes. We'll write every batch to the DA and the settlement layer. + BatchSubmitBytes uint64 `mapstructure:"batch_submit_bytes"` - // SequencerSetUpdateInterval defines the interval at which to fetch sequencer updates from the settlement layer + SequencerSetUpdateInterval time.Duration `mapstructure:"sequencer_update_interval"` } -// GetViperConfig reads configuration parameters from Viper instance. + func (nc *NodeConfig) GetViperConfig(cmd *cobra.Command, homeDir string) error { v := viper.GetViper() - // Loads dymint toml config file + EnsureRoot(homeDir, nil) v.SetConfigName("dymint") - v.AddConfigPath(homeDir) // search root directory - v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) // search root directory /config + v.AddConfigPath(homeDir) + v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) - // bind flags so we could override config file with flags + err := BindDymintFlags(cmd, v) if err != nil { return err } - // Read viper config + err = v.ReadInConfig() if err != nil { return err @@ -126,7 +126,7 @@ func (nc NodeConfig) Validate() error { return nil } -// Validate BlockManagerConfig + func (c BlockManagerConfig) Validate() error { if c.BlockTime < MinBlockTime { return fmt.Errorf("block_time cannot be less than %s", MinBlockTime) @@ -139,7 +139,7 @@ func (c BlockManagerConfig) Validate() error { if c.MaxIdleTime < 0 { return fmt.Errorf("max_idle_time must be positive or zero to disable") } - // MaxIdleTime zero disables adaptive block production. + if c.MaxIdleTime != 0 { if c.MaxIdleTime <= c.BlockTime || c.MaxIdleTime > MaxBatchSubmitTime { return fmt.Errorf("max_idle_time must be greater than block_time and not greater than %s", MaxBatchSubmitTime) @@ -203,14 +203,14 @@ func (nc NodeConfig) validateInstrumentation() error { return nc.Instrumentation.Validate() } -// InstrumentationConfig defines the configuration for metrics reporting. + type InstrumentationConfig struct { - // When true, Prometheus metrics are served under /metrics on - // PrometheusListenAddr. - // Check out the documentation for the list of available metrics. + + + Prometheus bool `mapstructure:"prometheus"` - // Address to listen for Prometheus collector(s) connections. + PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` } @@ -222,11 +222,11 @@ func (ic InstrumentationConfig) Validate() error { return nil } -// DBConfig holds configuration for the database. + type DBConfig struct { - // SyncWrites makes sure that data is written to disk before returning from a write operation. + SyncWrites bool `mapstructure:"sync_writes"` - // InMemory sets the database to run in-memory, without touching the disk. + InMemory bool `mapstructure:"in_memory"` } diff --git a/config/defaults.go b/config/defaults.go index b72ef3aac..0a75b14a6 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -9,7 +9,7 @@ import ( ) const ( - // DefaultListenAddress is a default listen address for P2P client. + DefaultListenAddress = "/ip4/0.0.0.0/tcp/26656" DefaultHomeDir = "sequencer_keys" @@ -17,10 +17,10 @@ const ( DefaultSequencerSetUpdateInterval = 3 * time.Minute ) -// DefaultNodeConfig keeps default values of NodeConfig + var DefaultNodeConfig = *DefaultConfig("") -// DefaultConfig returns a default configuration for dymint node. + func DefaultConfig(home string) *NodeConfig { cfg := &NodeConfig{ BlockManagerConfig: BlockManagerConfig{ @@ -57,7 +57,7 @@ func DefaultConfig(home string) *NodeConfig { } keyringDir := filepath.Join(home, DefaultHomeDir) - // Setting default params for sl grpc mock + defaultSlGrpcConfig := settlement.GrpcConfig{ Host: "127.0.0.1", Port: 7981, @@ -79,7 +79,7 @@ func DefaultConfig(home string) *NodeConfig { } cfg.SettlementConfig = defaultSLconfig - // Setting default params for da grpc mock + defaultDAGrpc := grpc.Config{ Host: "127.0.0.1", Port: 7980, diff --git a/config/flags.go b/config/flags.go index 1f1eaf83e..d476c39f2 100644 --- a/config/flags.go +++ b/config/flags.go @@ -32,11 +32,11 @@ const ( FlagP2PBootstrapRetryTime = "dymint.p2p_config.bootstrap_retry_time" ) -// AddNodeFlags adds Dymint specific configuration options to cobra Command. -// -// This function is called in cosmos-sdk. + + + func AddNodeFlags(cmd *cobra.Command) { - // Add tendermint default flags + tmcmd.AddNodeFlags(cmd) def := DefaultNodeConfig @@ -58,7 +58,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String(FlagP2PListenAddress, def.P2PConfig.ListenAddress, "P2P listen address") cmd.Flags().String(FlagP2PBootstrapNodes, def.P2PConfig.BootstrapNodes, "P2P bootstrap nodes") cmd.Flags().Duration(FlagP2PBootstrapRetryTime, def.P2PConfig.BootstrapRetryTime, "P2P bootstrap time") - cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") //nolint:gosec // GossipSubCacheSize should be always positive + cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") } func BindDymintFlags(cmd *cobra.Command, v *viper.Viper) error { diff --git a/config/p2p.go b/config/p2p.go index a2449ed43..71b18b180 100644 --- a/config/p2p.go +++ b/config/p2p.go @@ -5,27 +5,27 @@ import ( "time" ) -// P2PConfig stores configuration related to peer-to-peer networking. + type P2PConfig struct { - // Listening address for P2P connections + ListenAddress string `mapstructure:"p2p_listen_address"` - // List of nodes used for P2P bootstrapping + BootstrapNodes string `mapstructure:"p2p_bootstrap_nodes"` - // List of nodes persistent P2P nodes + PersistentNodes string `mapstructure:"p2p_persistent_nodes"` - // Size of the Gossipsub router cache + GossipSubCacheSize int `mapstructure:"p2p_gossip_cache_size"` - // Time interval a node tries to bootstrap again, in case no nodes connected + BootstrapRetryTime time.Duration `mapstructure:"p2p_bootstrap_retry_time"` - // Param used to enable block sync from p2p + BlockSyncEnabled bool `mapstructure:"p2p_blocksync_enabled"` - // Time interval used by a node to request missing blocks (gap between cached blocks and local height) on demand from other peers using blocksync + BlockSyncRequestIntervalTime time.Duration `mapstructure:"p2p_blocksync_block_request_interval"` - // Param used to enable the advertisement of the node to be part of the P2P network in the DHT + AdvertisingEnabled bool `mapstructure:"p2p_advertising_enabled"` } -// Validate P2PConfig + func (c P2PConfig) Validate() error { if c.GossipSubCacheSize < 0 { return fmt.Errorf("gossipsub cache size cannot be negative") diff --git a/config/rpc.go b/config/rpc.go index d6b14303a..baa5e8e7b 100644 --- a/config/rpc.go +++ b/config/rpc.go @@ -1,38 +1,38 @@ package config -// RPCConfig holds RPC configuration params. + type RPCConfig struct { ListenAddress string - // Cross Origin Resource Sharing settings + CORSAllowedOrigins []string CORSAllowedMethods []string CORSAllowedHeaders []string - // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} - // 1024 - 40 - 10 - 50 = 924 = ~900 + + + + + + + MaxOpenConnections int - // The path to a file containing certificate that is used to create the HTTPS server. - // Might be either absolute path or path related to Tendermint's config directory. - // - // If the certificate is signed by a certificate authority, - // the certFile should be the concatenation of the server's certificate, any intermediates, - // and the CA's certificate. - // - // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. - // Otherwise, HTTP server is run. + + + + + + + + + TLSCertFile string `mapstructure:"tls-cert-file"` - // The path to a file containing matching private key that is used to create the HTTPS server. - // Might be either absolute path or path related to tendermint's config directory. - // - // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. - // Otherwise, HTTP server is run. + + + + + TLSKeyFile string `mapstructure:"tls-key-file"` } diff --git a/config/toml.go b/config/toml.go index 9ee3d544d..4bf51e276 100644 --- a/config/toml.go +++ b/config/toml.go @@ -9,7 +9,7 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) -// DefaultDirPerm is the default permissions used when creating directories. + const DefaultDirPerm = 0o700 var configTemplate *template.Template @@ -24,10 +24,10 @@ func init() { } } -/****** these are for production settings ***********/ -// EnsureRoot creates the root, config, and data directories if they don't exist, -// and panics if it fails. + + + func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil { panic(err.Error()) @@ -42,13 +42,13 @@ func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { configFilePath := filepath.Join(rootDir, DefaultConfigDirName, DefaultConfigFileName) - // Write default config file if missing. + if !tmos.FileExists(configFilePath) { WriteConfigFile(configFilePath, defaultConfig) } } -// WriteConfigFile renders config using the template and writes it to configFilePath. + func WriteConfigFile(configFilePath string, config *NodeConfig) { var buffer bytes.Buffer @@ -59,8 +59,8 @@ func WriteConfigFile(configFilePath string, config *NodeConfig) { tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644) } -// Note: any changes to the comments/variables/mapstructure -// must be reflected in the appropriate struct in config/config.go + + const defaultConfigTemplate = ` ####################################################### ### Dymint Configuration Options ### diff --git a/conv/config.go b/conv/config.go index 65498dc12..ec9b9e7f4 100644 --- a/conv/config.go +++ b/conv/config.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/config" ) -// GetNodeConfig translates Tendermint's configuration into Dymint configuration. -// -// This method only translates configuration, and doesn't verify it. If some option is missing in Tendermint's -// config, it's skipped during translation. + + + + func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf == nil { return errors.New("tendermint config is nil but required to populate Dymint config") @@ -31,12 +31,7 @@ func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf.Mempool == nil { return errors.New("tendermint mempool config is nil but required to populate Dymint config") } - /* - In the above, we are copying the rpc/p2p from Tendermint's configuration to Dymint's configuration. - This was implemented by the original rollkit authors, and they have not provided any explanation for this. - - For the mempool we simply copy the object. If we want to be more selective, we can adjust later. - */ + nodeConf.MempoolConfig = *tmConf.Mempool return nil diff --git a/conv/crypto.go b/conv/crypto.go index b2c49e18a..4f04470fa 100644 --- a/conv/crypto.go +++ b/conv/crypto.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// GetNodeKey creates libp2p private key from Tendermints NodeKey. + func GetNodeKey(nodeKey *p2p.NodeKey) (crypto.PrivKey, error) { if nodeKey == nil || nodeKey.PrivKey == nil { return nil, ErrNilKey diff --git a/da/avail/avail.go b/da/avail/avail.go index 81c30b48b..3d375b000 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -34,7 +34,7 @@ const ( DataCallMethod = "submit_data" DataCallSectionIndex = 29 DataCallMethodIndex = 1 - maxBlobSize = 2097152 // 2MB according to Avail docs https://docs.availproject.org/docs/build-with-avail/overview#expandable-blockspace + maxBlobSize = 2097152 ) type SubstrateApiI interface { @@ -74,35 +74,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// WithClient is an option which sets the client. + func WithClient(client SubstrateApiI) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).client = client } } -// WithTxInclusionTimeout is an option which sets the timeout for waiting for transaction inclusion. + func WithTxInclusionTimeout(timeout time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).txInclusionTimeout = timeout } } -// WithBatchRetryDelay is an option which sets the delay between batch retries. + func WithBatchRetryDelay(delay time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryDelay = delay } } -// WithBatchRetryAttempts is an option which sets the number of batch retries. + func WithBatchRetryAttempts(attempts uint) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryAttempts = attempts } } -// Init initializes DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -114,18 +114,18 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S } } - // Set defaults + c.pubsubServer = pubsubServer c.txInclusionTimeout = defaultTxInculsionTimeout c.batchRetryDelay = defaultBatchRetryDelay c.batchRetryAttempts = defaultBatchRetryAttempts - // Apply options + for _, apply := range options { apply(c) } - // If client wasn't set, create a new one + if c.client == nil { substrateApiClient, err := gsrpc.NewSubstrateAPI(c.config.ApiURL) if err != nil { @@ -144,32 +144,32 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S return nil } -// Start starts DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Start() error { c.synced <- struct{}{} return nil } -// Stop stops DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Stop() error { c.cancel() close(c.synced) return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Avail } -// RetrieveBatches retrieves batch from DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - //nolint:typecheck + blockHash, err := c.client.GetBlockHash(daMetaData.Height) if err != nil { return da.ResultRetrieveBatch{ @@ -190,10 +190,10 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet }, } } - // Convert the data returned to batches + var batches []*types.Batch for _, ext := range block.Block.Extrinsics { - // these values below are specific indexes only for data submission, differs with each extrinsic + if ext.Signature.AppID.Int64() == c.config.AppID && ext.Method.CallIndex.SectionIndex == DataCallSectionIndex && ext.Method.CallIndex.MethodIndex == DataCallMethodIndex { @@ -206,16 +206,16 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet c.logger.Error("unmarshal batch", "daHeight", daMetaData.Height, "error", err) continue } - // Convert the proto batch to a batch + batch := &types.Batch{} err = batch.FromProto(&pbBatch) if err != nil { c.logger.Error("batch from proto", "daHeight", daMetaData.Height, "error", err) continue } - // Add the batch to the list + batches = append(batches, batch) - // Remove the bytes we just decoded. + data = data[proto.Size(&pbBatch):] } @@ -233,7 +233,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet } } -// SubmitBatch submits batch to DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { blob, err := batch.MarshalBinary() if err != nil { @@ -250,8 +250,8 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return c.submitBatchLoop(blob) } -// submitBatchLoop tries submitting the batch. In case we get a configuration error we would like to stop trying, -// otherwise, for network error we keep trying indefinitely. + + func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.ResultSubmitBatch { for { select { @@ -318,8 +318,8 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result } } -// broadcastTx broadcasts the transaction to the network and in case of success -// returns the block height the batch was included in. + + func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { meta, err := c.client.GetMetadataLatest() if err != nil { @@ -329,7 +329,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Create the extrinsic + ext := availtypes.NewExtrinsic(newCall) genesisHash, err := c.client.GetBlockHash(0) if err != nil { @@ -343,7 +343,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Get the account info for the nonce + key, err := availtypes.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) @@ -364,16 +364,16 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { SpecVersion: rv.SpecVersion, Tip: availtypes.NewUCompactFromUInt(c.config.Tip), TransactionVersion: rv.TransactionVersion, - AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), //nolint:gosec // AppID should be always positive + AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), } - // Sign the transaction using Alice's default account + err = ext.Sign(keyringPair, options) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Send the extrinsic + sub, err := c.client.SubmitAndWatchExtrinsic(ext) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastNetworkError, err) @@ -419,7 +419,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { } } -// CheckBatchAvailability checks batch availability in DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -429,7 +429,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } -// getHeightFromHash returns the block height from the block hash + func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (uint64, error) { c.logger.Debug("Getting block height from hash", "hash", hash) header, err := c.client.GetHeader(hash) @@ -439,12 +439,12 @@ func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (u return uint64(header.Number), nil } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } -// GetBalance returns the balance for a specific address + func (c *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { return da.Balance{}, nil } diff --git a/da/celestia/celestia.go b/da/celestia/celestia.go index 80cd32f85..6eda30bf5 100644 --- a/da/celestia/celestia.go +++ b/da/celestia/celestia.go @@ -26,7 +26,7 @@ import ( uretry "github.com/dymensionxyz/dymint/utils/retry" ) -// DataAvailabilityLayerClient use celestia-node public API. + type DataAvailabilityLayerClient struct { rpc celtypes.CelestiaRPCClient @@ -43,35 +43,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// WithRPCClient sets rpc client. + func WithRPCClient(rpc celtypes.CelestiaRPCClient) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).rpc = rpc } } -// WithRPCRetryDelay sets failed rpc calls retry delay. + func WithRPCRetryDelay(delay time.Duration) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryDelay = delay } } -// WithRPCAttempts sets failed rpc calls retry attempts. + func WithRPCAttempts(attempts int) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryAttempts = &attempts } } -// WithSubmitBackoff sets submit retry delay config. + func WithSubmitBackoff(c uretry.BackoffConfig) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.Backoff = c } } -// Init initializes DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -85,7 +85,7 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S c.pubsubServer = pubsubServer - // Apply options + for _, apply := range options { apply(c) } @@ -113,7 +113,7 @@ func createConfig(bz []byte) (c Config, err error) { return c, errors.New("gas prices must be set") } - // NOTE: 0 is valid value for RetryAttempts + if c.RetryDelay == 0 { c.RetryDelay = defaultRpcRetryDelay @@ -128,11 +128,11 @@ func createConfig(bz []byte) (c Config, err error) { return c, nil } -// Start prepares DataAvailabilityLayerClient to work. + func (c *DataAvailabilityLayerClient) Start() (err error) { c.logger.Info("Starting Celestia Data Availability Layer Client.") - // other client has already been set + if c.rpc != nil { c.logger.Info("Celestia-node client already set.") return nil @@ -150,7 +150,7 @@ func (c *DataAvailabilityLayerClient) Start() (err error) { return } -// Stop stops DataAvailabilityLayerClient. + func (c *DataAvailabilityLayerClient) Stop() error { c.logger.Info("Stopping Celestia Data Availability Layer Client.") err := c.pubsubServer.Stop() @@ -162,17 +162,17 @@ func (c *DataAvailabilityLayerClient) Stop() error { return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Celestia } -// SubmitBatch submits a batch to the DA layer. + func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { data, err := batch.MarshalBinary() if err != nil { @@ -204,10 +204,10 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{} default: - // TODO(srene): Split batch in multiple blobs if necessary if supported + height, commitment, err := c.submit(data) if errors.Is(err, gerrc.ErrInternal) { - // no point retrying if it's because of our code being wrong + err = fmt.Errorf("submit: %w", err) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -273,7 +273,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet resultRetrieveBatch = c.retrieveBatches(daMetaData) return resultRetrieveBatch.Error }, - retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -368,7 +368,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu return nil }, - retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -392,7 +392,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu dah, err := c.getDataAvailabilityHeaders(daMetaData.Height) if err != nil { - // Returning Data Availability header Data Root for dispute validation + return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -407,10 +407,10 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu proof, err := c.getProof(daMetaData) if err != nil || proof == nil { - // TODO (srene): Not getting proof means there is no existing data for the namespace and the commitment (the commitment is wrong). - // Therefore we need to prove whether the commitment is wrong or the span does not exists. - // In case the span is correct it is necessary to return the data for the span and the proofs to the data root, so we can prove the data - // is the data for the span, and reproducing the commitment will generate a different one. + + + + return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -433,9 +433,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu if daMetaData.Index > 0 && daMetaData.Length > 0 { if index != daMetaData.Index || shares != daMetaData.Length { - // TODO (srene): In this case the commitment is correct but does not match the span. - // If the span is correct we have to repeat the previous step (sending data + proof of data) - // In case the span is not correct we need to send unavailable proof by sending proof of any row root to data root + + + return da.ResultCheckBatch{ CheckMetaData: DACheckMetaData, BaseResult: da.BaseResult{ @@ -449,9 +449,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } included, err = c.validateProof(daMetaData, proof) - // The both cases below (there is an error validating the proof or the proof is wrong) should not happen - // if we consider correct functioning of the celestia light node. - // This will only happen in case the previous step the celestia light node returned wrong proofs.. + + + if err != nil { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -485,7 +485,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } } -// Submit submits the Blobs to Data Availability layer. + func (c *DataAvailabilityLayerClient) submit(daBlob da.Blob) (uint64, da.Commitment, error) { blobs, commitments, err := c.blobsAndCommitments(daBlob) if err != nil { @@ -554,7 +554,7 @@ func (c *DataAvailabilityLayerClient) getDataAvailabilityHeaders(height uint64) return headers.DAH, nil } -// Celestia syncing in background + func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { sync := func() error { done := make(chan error, 1) @@ -579,7 +579,7 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } err := retry.Do(sync, - retry.Attempts(0), // try forever + retry.Attempts(0), retry.Delay(10*time.Second), retry.LastErrorOnly(true), retry.DelayType(retry.FixedDelay), @@ -596,12 +596,12 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSizeBytes } -// GetSignerBalance returns the balance for a specific address + func (d *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ctx, cancel := context.WithTimeout(d.ctx, d.config.Timeout) defer cancel() diff --git a/da/celestia/config.go b/da/celestia/config.go index 025a42e33..a1f764d4d 100644 --- a/da/celestia/config.go +++ b/da/celestia/config.go @@ -24,7 +24,7 @@ var defaultSubmitBackoff = uretry.NewBackoffConfig( uretry.WithMaxDelay(time.Second*6), ) -// Config stores Celestia DALC configuration parameters. + type Config struct { BaseURL string `json:"base_url,omitempty"` AppNodeURL string `json:"app_node_url,omitempty"` @@ -60,13 +60,13 @@ func (c *Config) InitNamespaceID() error { if c.NamespaceIDStr == "" { c.NamespaceIDStr = generateRandNamespaceID() } - // Decode NamespaceID from string to byte array + namespaceBytes, err := hex.DecodeString(c.NamespaceIDStr) if err != nil { return fmt.Errorf("decode string: %w", err) } - // Check if NamespaceID is of correct length (10 bytes) + if len(namespaceBytes) != openrpcns.NamespaceVersionZeroIDSize { return fmt.Errorf("wrong length: got: %v: expect %v", len(namespaceBytes), openrpcns.NamespaceVersionZeroIDSize) } diff --git a/da/celestia/mock/messages.go b/da/celestia/mock/messages.go index cf97dd2c5..d0140a084 100644 --- a/da/celestia/mock/messages.go +++ b/da/celestia/mock/messages.go @@ -5,8 +5,8 @@ import ( "encoding/binary" ) -// This code is extracted from celestia-app. It's here to build shares from messages (serialized blocks). -// TODO(tzdybal): if we stop using `/namespaced_shares` we can get rid of this file. + + const ( shareSize = 256 @@ -14,8 +14,8 @@ const ( msgShareSize = shareSize - namespaceSize ) -// splitMessage breaks the data in a message into the minimum number of -// namespaced shares + + func splitMessage(rawData []byte, nid []byte) []NamespacedShare { shares := make([]NamespacedShare, 0) firstRawShare := append(append( @@ -40,10 +40,10 @@ func splitMessage(rawData []byte, nid []byte) []NamespacedShare { return shares } -// Share contains the raw share data without the corresponding namespace. + type Share []byte -// NamespacedShare extends a Share with the corresponding namespace. + type NamespacedShare struct { Share ID []byte @@ -68,8 +68,8 @@ func zeroPadIfNecessary(share []byte, width int) []byte { return share } -// marshalDelimited marshals the raw data (excluding the namespace) of this -// message and prefixes it with the length of that encoding. + + func marshalDelimited(data []byte) ([]byte, error) { lenBuf := make([]byte, binary.MaxVarintLen64) length := uint64(len(data)) @@ -77,8 +77,8 @@ func marshalDelimited(data []byte) ([]byte, error) { return append(lenBuf[:n], data...), nil } -// appendToShares appends raw data as shares. -// Used to build shares from blocks/messages. + + func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []NamespacedShare { if len(rawData) <= msgShareSize { rawShare := append(append( @@ -89,7 +89,7 @@ func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []Name paddedShare := zeroPadIfNecessary(rawShare, shareSize) share := NamespacedShare{paddedShare, nid} shares = append(shares, share) - } else { // len(rawData) > msgShareSize + } else { shares = append(shares, splitMessage(rawData, nid)...) } return shares diff --git a/da/celestia/mock/server.go b/da/celestia/mock/server.go index 8b76d44fb..98434285a 100644 --- a/da/celestia/mock/server.go +++ b/da/celestia/mock/server.go @@ -20,7 +20,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// Server mocks celestia-node HTTP API. + type Server struct { da *local.DataAvailabilityLayerClient blockTime time.Duration @@ -28,7 +28,7 @@ type Server struct { logger types.Logger } -// NewServer creates new instance of Server. + func NewServer(blockTime time.Duration, logger types.Logger) *Server { return &Server{ da: new(local.DataAvailabilityLayerClient), @@ -37,7 +37,7 @@ func NewServer(blockTime time.Duration, logger types.Logger) *Server { } } -// Start starts HTTP server with given listener. + func (s *Server) Start(listener net.Listener) error { err := s.da.Init([]byte(s.blockTime.String()), pubsub.NewServer(), store.NewDefaultInMemoryKVStore(), s.logger) if err != nil { @@ -56,7 +56,7 @@ func (s *Server) Start(listener net.Listener) error { return nil } -// Stop shuts down the Server. + func (s *Server) Stop() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() diff --git a/da/celestia/rpc.go b/da/celestia/rpc.go index f4dac9d64..be0265f1f 100644 --- a/da/celestia/rpc.go +++ b/da/celestia/rpc.go @@ -14,49 +14,49 @@ import ( var _ types.CelestiaRPCClient = &OpenRPC{} -// OpenRPC is a wrapper around the openrpc client. + type OpenRPC struct { rpc *openrpc.Client } -// NewOpenRPC creates a new openrpc client. + func NewOpenRPC(rpc *openrpc.Client) *OpenRPC { return &OpenRPC{ rpc: rpc, } } -// GetAll gets all blobs. + func (c *OpenRPC) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { return c.rpc.Blob.GetAll(ctx, height, namespaces) } -// Submit blobs. + func (c *OpenRPC) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { return c.rpc.Blob.Submit(ctx, blobs, options) } -// GetProof gets the proof for a specific share commitment. + func (c *OpenRPC) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { return c.rpc.Blob.GetProof(ctx, height, namespace, commitment) } -// Get blob for a specific share commitment + func (c *OpenRPC) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { return c.rpc.Blob.Get(ctx, height, namespace, commitment) } -// GetByHeight gets the header by height + func (c *OpenRPC) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return c.rpc.Header.GetByHeight(ctx, height) } -// Included checks if a blob is included in the chain + func (c *OpenRPC) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { return c.rpc.Blob.Included(ctx, height, namespace, proof, commitment) } -// GetSignerBalance balance for a specific address + func (c *OpenRPC) GetSignerBalance(ctx context.Context) (*state.Balance, error) { return c.rpc.State.Balance(ctx) } diff --git a/da/celestia/types/rpc.go b/da/celestia/types/rpc.go index 2949f65ec..8fded2362 100644 --- a/da/celestia/types/rpc.go +++ b/da/celestia/types/rpc.go @@ -10,16 +10,16 @@ import ( ) type CelestiaRPCClient interface { - /* ---------------------------------- blob ---------------------------------- */ + Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) GetAll(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) - /* --------------------------------- header --------------------------------- */ + GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) - /* ---------------------------------- state --------------------------------- */ + GetSignerBalance(ctx context.Context) (*state.Balance, error) } diff --git a/da/celestia/types/types.go b/da/celestia/types/types.go index 9a10f3a0b..52be192a6 100644 --- a/da/celestia/types/types.go +++ b/da/celestia/types/types.go @@ -4,74 +4,74 @@ import ( "math" ) -// These constants were originally sourced from: -// https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants -// -// They can not change throughout the lifetime of a network. + + + + const ( - // NamespaceVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 - // NamespaceVersionMaxValue is the maximum value a namespace version can be. - // This const must be updated if NamespaceVersionSize is changed. + + NamespaceVersionMaxValue = math.MaxUint8 - // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 28 - // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = NamespaceVersionSize + NamespaceIDSize - // ShareSize is the size of a share in bytes. + ShareSize = 512 - // ShareInfoBytes is the number of bytes reserved for information. The info - // byte contains the share version and a sequence start idicator. + + ShareInfoBytes = 1 - // SequenceLenBytes is the number of bytes reserved for the sequence length - // that is present in the first share of a sequence. + + SequenceLenBytes = 4 - // ShareVersionZero is the first share version format. + ShareVersionZero = uint8(0) - // DefaultShareVersion is the defacto share version. Use this if you are - // unsure of which version to use. + + DefaultShareVersion = ShareVersionZero - // CompactShareReservedBytes is the number of bytes reserved for the location of - // the first unit (transaction, ISR) in a compact share. + + CompactShareReservedBytes = 4 - // FirstCompactShareContentSize is the number of bytes usable for data in - // the first compact share of a sequence. + + FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes - // ContinuationCompactShareContentSize is the number of bytes usable for - // data in a continuation compact share of a sequence. + + ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes - // FirstSparseShareContentSize is the number of bytes usable for data in the - // first sparse share of a sequence. + + FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - // ContinuationSparseShareContentSize is the number of bytes usable for data - // in a continuation sparse share of a sequence. + + ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - // MinSquareSize is the smallest original square width. + MinSquareSize = 1 - // MinshareCount is the minimum number of shares allowed in the original - // data square. + + MinShareCount = MinSquareSize * MinSquareSize - // MaxShareVersion is the maximum value a share version can be. + MaxShareVersion = 127 - // Celestia matrix size + DefaultGovMaxSquareSize = 64 - // Default maximum bytes per blob allowed + DefaultMaxBytes = DefaultGovMaxSquareSize * DefaultGovMaxSquareSize * ContinuationSparseShareContentSize ) diff --git a/da/da.go b/da/da.go index 3bde8023f..cd85c0e7d 100644 --- a/da/da.go +++ b/da/da.go @@ -15,30 +15,30 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// StatusCode is a type for DA layer return status. -// TODO: define an enum of different non-happy-path cases -// that might need to be handled by Dymint independent of -// the underlying DA chain. Use int32 to match the protobuf -// enum representation. + + + + + type StatusCode int32 -// Commitment should contain serialized cryptographic commitment to Blob value. + type Commitment = []byte -// Blob is the data submitted/received from DA interface. + type Blob = []byte -// Data Availability return codes. + const ( StatusUnknown StatusCode = iota StatusSuccess StatusError ) -// Client defines all the possible da clients + type Client string -// Data availability clients + const ( Mock Client = "mock" Celestia Client = "celestia" @@ -46,34 +46,34 @@ const ( Grpc Client = "grpc" ) -// Option is a function that sets a parameter on the da layer. + type Option func(DataAvailabilityLayerClient) -// BaseResult contains basic information returned by DA layer. + type BaseResult struct { - // Code is to determine if the action succeeded. + Code StatusCode - // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) + Message string - // Error is the error returned by the DA layer + Error error } -// DAMetaData contains meta data about a batch on the Data Availability Layer. + type DASubmitMetaData struct { - // Height is the height of the block in the da layer + Height uint64 - // Namespace ID + Namespace []byte - // Client is the client to use to fetch data from the da layer + Client Client - // Share commitment, for each blob, used to obtain blobs and proofs + Commitment Commitment - // Initial position for each blob in the NMT + Index int - // Number of shares of each blob + Length int - // any NMT root for the specific height, necessary for non-inclusion proof + Root []byte } @@ -84,9 +84,9 @@ type Balance struct { const PathSeparator = "|" -// ToPath converts a DAMetaData to a path. + func (d *DASubmitMetaData) ToPath() string { - // convert uint64 to string + if d.Commitment != nil { commitment := hex.EncodeToString(d.Commitment) dataroot := hex.EncodeToString(d.Root) @@ -109,7 +109,7 @@ func (d *DASubmitMetaData) ToPath() string { } } -// FromPath parses a path to a DAMetaData. + func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { pathParts := strings.FieldsFunc(path, func(r rune) bool { return r == rune(PathSeparator[0]) }) if len(pathParts) < 2 { @@ -125,7 +125,7 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { Height: height, Client: Client(pathParts[0]), } - // TODO: check per DA and panic if not enough parts + if len(pathParts) == 7 { submitData.Index, err = strconv.Atoi(pathParts[2]) if err != nil { @@ -152,93 +152,93 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { return submitData, nil } -// DAMetaData contains meta data about a batch on the Data Availability Layer. + type DACheckMetaData struct { - // Height is the height of the block in the da layer + Height uint64 - // Client is the client to use to fetch data from the da layer + Client Client - // Submission index in the Hub + SLIndex uint64 - // Namespace ID + Namespace []byte - // Share commitment, for each blob, used to obtain blobs and proofs + Commitment Commitment - // Initial position for each blob in the NMT + Index int - // Number of shares of each blob + Length int - // Proofs necessary to validate blob inclusion in the specific height + Proofs []*blob.Proof - // NMT roots for each NMT Proof + NMTRoots []byte - // Proofs necessary to validate blob inclusion in the specific height + RowProofs []*merkle.Proof - // any NMT root for the specific height, necessary for non-inclusion proof + Root []byte } -// ResultSubmitBatch contains information returned from DA layer after block submission. + type ResultSubmitBatch struct { BaseResult - // DAHeight informs about a height on Data Availability Layer for given result. + SubmitMetaData *DASubmitMetaData } -// ResultCheckBatch contains information about block availability, returned from DA layer client. + type ResultCheckBatch struct { BaseResult - // DAHeight informs about a height on Data Availability Layer for given result. + CheckMetaData *DACheckMetaData } -// ResultRetrieveBatch contains batch of blocks returned from DA layer client. + type ResultRetrieveBatch struct { BaseResult - // Block is the full block retrieved from Data Availability Layer. - // If Code is not equal to StatusSuccess, it has to be nil. + + Batches []*types.Batch - // DAHeight informs about a height on Data Availability Layer for given result. + CheckMetaData *DACheckMetaData } -// DataAvailabilityLayerClient defines generic interface for DA layer block submission. -// It also contains life-cycle methods. + + type DataAvailabilityLayerClient interface { - // Init is called once to allow DA client to read configuration and initialize resources. + Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...Option) error - // Start is called once, after Init. It's implementation should start operation of DataAvailabilityLayerClient. + Start() error - // Stop is called once, when DataAvailabilityLayerClient is no longer needed. + Stop() error - // SubmitBatch submits the passed in block to the DA layer. - // This should create a transaction which (potentially) - // triggers a state transition in the DA layer. + + + SubmitBatch(batch *types.Batch) ResultSubmitBatch GetClientType() Client - // CheckBatchAvailability checks the availability of the blob submitted getting proofs and validating them + CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch - // Used to check when the DA light client finished syncing + WaitForSyncing() - // Returns the maximum allowed blob size in the DA, used to check the max batch size configured + GetMaxBlobSizeBytes() uint32 - // GetSignerBalance returns the balance for a specific address + GetSignerBalance() (Balance, error) } -// BatchRetriever is additional interface that can be implemented by Data Availability Layer Client that is able to retrieve -// block data from DA layer. This gives the ability to use it for block synchronization. + + type BatchRetriever interface { - // RetrieveBatches returns blocks at given data layer height from data availability layer. + RetrieveBatches(daMetaData *DASubmitMetaData) ResultRetrieveBatch - // CheckBatchAvailability checks the availability of the blob received getting proofs and validating them + CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch } diff --git a/da/errors.go b/da/errors.go index ba02343a8..dca7871b7 100644 --- a/da/errors.go +++ b/da/errors.go @@ -7,26 +7,26 @@ import ( ) var ( - // ErrFailedTxBuild is returned when transaction build fails. + ErrTxBroadcastConfigError = errors.New("failed building tx") - // ErrFailedTxBroadcast is returned when transaction broadcast fails. + ErrTxBroadcastNetworkError = errors.New("failed broadcasting tx") - // ErrTxBroadcastTimeout is returned when transaction broadcast times out. + ErrTxBroadcastTimeout = errors.New("broadcast timeout error") - // ErrUnableToGetProof is returned when proof is not available. + ErrUnableToGetProof = errors.New("unable to get proof") - // ErrRetrieval is returned when retrieval rpc falls + ErrRetrieval = errors.New("retrieval failed") - // ErrBlobNotFound is returned when blob is not found. + ErrBlobNotFound = errors.New("blob not found") - // ErrBlobNotIncluded is returned when blob is not included. + ErrBlobNotIncluded = errors.New("blob not included") - // ErrBlobNotParsed is returned when blob cannot be parsed + ErrBlobNotParsed = errors.New("unable to parse blob to batch") - // ErrProofNotMatching is returned when proof does not match. + ErrProofNotMatching = errors.New("proof not matching") - // ErrNameSpace is returned when wrong namespace used + ErrNameSpace = errors.New("namespace not matching") - // ErrDAMismatch is returned when the DA client used does not match the da client specified in the da path of the state update + ErrDAMismatch = gerrc.ErrInvalidArgument.Wrap("DA in config not matching DA path") ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index 8636cf583..7daa0c667 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -16,9 +16,9 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -const maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) +const maxBlobSize = 2097152 + -// DataAvailabilityLayerClient is a generic client that proxies all DA requests via gRPC. type DataAvailabilityLayerClient struct { config Config @@ -28,14 +28,14 @@ type DataAvailabilityLayerClient struct { logger types.Logger } -// Config contains configuration options for DataAvailabilityLayerClient. + type Config struct { - // TODO(tzdybal): add more options! + Host string `json:"host"` Port int `json:"port"` } -// DefaultConfig defines default values for DataAvailabilityLayerClient configuration. + var DefaultConfig = Config{ Host: "127.0.0.1", Port: 7980, @@ -46,7 +46,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// Init sets the configuration options. + func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { d.logger = logger d.synced = make(chan struct{}, 1) @@ -57,14 +57,14 @@ func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ st return json.Unmarshal(config, &d.config) } -// Start creates connection to gRPC server and instantiates gRPC client. + func (d *DataAvailabilityLayerClient) Start() error { d.logger.Info("starting GRPC DALC", "host", d.config.Host, "port", d.config.Port) d.synced <- struct{}{} var err error var opts []grpc.DialOption - // TODO(tzdybal): add more options + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) d.conn, err = grpc.Dial(d.config.Host+":"+strconv.Itoa(d.config.Port), opts...) if err != nil { @@ -75,23 +75,23 @@ func (d *DataAvailabilityLayerClient) Start() error { return nil } -// Stop closes connection to gRPC server. + func (d *DataAvailabilityLayerClient) Stop() error { d.logger.Info("stopping GRPC DALC") return d.conn.Close() } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (d *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Grpc } -// SubmitBatch proxies SubmitBatch request to gRPC server. + func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { resp, err := d.client.SubmitBatch(context.TODO(), &dalc.SubmitBatchRequest{Batch: batch.ToProto()}) if err != nil { @@ -111,7 +111,7 @@ func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } -// CheckBatchAvailability proxies CheckBatchAvailability request to gRPC server. + func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { resp, err := d.client.CheckBatchAvailability(context.TODO(), &dalc.CheckBatchAvailabilityRequest{DataLayerHeight: daMetaData.Height}) if err != nil { @@ -122,12 +122,12 @@ func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } -// RetrieveBatches proxies RetrieveBlocks request to gRPC server. + func (d *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { resp, err := d.client.RetrieveBatches(context.TODO(), &dalc.RetrieveBatchesRequest{DataLayerHeight: daMetaData.Height}) if err != nil { diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index a8f21e508..e303e2901 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// GetServer creates and returns gRPC server instance. + func GetServer(kv store.KV, conf grpcda.Config, mockConfig []byte) *grpc.Server { logger := tmlog.NewTMLogger(os.Stdout) diff --git a/da/local/local.go b/da/local/local.go index 3852b2797..009beaab8 100644 --- a/da/local/local.go +++ b/da/local/local.go @@ -1,7 +1,7 @@ package local import ( - "crypto/sha1" //#nosec + "crypto/sha1" "encoding/binary" "math/rand" "sync/atomic" @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// DataAvailabilityLayerClient is intended only for usage in tests. -// It does actually ensures DA - it stores data in-memory. + + type DataAvailabilityLayerClient struct { logger types.Logger dalcKV store.KV @@ -26,7 +26,7 @@ type DataAvailabilityLayerClient struct { const ( defaultBlockTime = 3 * time.Second - maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) + maxBlobSize = 2097152 ) type config struct { @@ -38,7 +38,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// Init is called once to allow DA client to read configuration and initialize resources. + func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalcKV store.KV, logger types.Logger, options ...da.Option) error { m.logger = logger m.dalcKV = dalcKV @@ -56,7 +56,7 @@ func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalc return nil } -// Start implements DataAvailabilityLayerClient interface. + func (m *DataAvailabilityLayerClient) Start() error { m.logger.Debug("Mock Data Availability Layer Client starting") m.synced <- struct{}{} @@ -70,26 +70,26 @@ func (m *DataAvailabilityLayerClient) Start() error { return nil } -// Stop implements DataAvailabilityLayerClient interface. + func (m *DataAvailabilityLayerClient) Stop() error { m.logger.Debug("Mock Data Availability Layer Client stopped") close(m.synced) return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (m *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Mock } -// SubmitBatch submits the passed in batch to the DA layer. -// This should create a transaction which (potentially) -// triggers a state transition in the DA layer. + + + func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { daHeight := m.daHeight.Load() @@ -99,7 +99,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) //#nosec + hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) err = m.dalcKV.Set(getKey(daHeight, batch.StartHeight()), hash[:]) if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} @@ -109,7 +109,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - m.daHeight.Store(daHeight + 1) // guaranteed no ABA problem as submit batch is only called when the object is locked + m.daHeight.Store(daHeight + 1) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -123,13 +123,13 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } -// CheckBatchAvailability queries DA layer to check data availability of block corresponding to given header. + func (m *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { batchesRes := m.RetrieveBatches(daMetaData) return da.ResultCheckBatch{BaseResult: da.BaseResult{Code: batchesRes.Code, Message: batchesRes.Message, Error: batchesRes.Error}, CheckMetaData: batchesRes.CheckMetaData} } -// RetrieveBatches returns block at given height from data availability layer. + func (m *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { if daMetaData.Height >= m.daHeight.Load() { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: "batch not found", Error: da.ErrBlobNotFound}} @@ -174,11 +174,11 @@ func getKey(daHeight uint64, height uint64) []byte { } func (m *DataAvailabilityLayerClient) updateDAHeight() { - blockStep := rand.Uint64()%10 + 1 //#nosec + blockStep := rand.Uint64()%10 + 1 m.daHeight.Add(blockStep) } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } diff --git a/da/registry/registry.go b/da/registry/registry.go index b520c41c9..4779e2ba0 100644 --- a/da/registry/registry.go +++ b/da/registry/registry.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/dymint/da/local" ) -// this is a central registry for all Data Availability Layer Clients + var clients = map[string]func() da.DataAvailabilityLayerClient{ "mock": func() da.DataAvailabilityLayerClient { return &local.DataAvailabilityLayerClient{} }, "grpc": func() da.DataAvailabilityLayerClient { return &grpc.DataAvailabilityLayerClient{} }, @@ -16,7 +16,7 @@ var clients = map[string]func() da.DataAvailabilityLayerClient{ "avail": func() da.DataAvailabilityLayerClient { return &avail.DataAvailabilityLayerClient{} }, } -// GetClient returns client identified by name. + func GetClient(name string) da.DataAvailabilityLayerClient { f, ok := clients[name] if !ok { @@ -25,7 +25,7 @@ func GetClient(name string) da.DataAvailabilityLayerClient { return f() } -// RegisteredClients returns names of all DA clients in registry. + func RegisteredClients() []string { registered := make([]string, 0, len(clients)) for name := range clients { diff --git a/indexers/blockindexer/block.go b/indexers/blockindexer/block.go index 08d2f6d16..0ac87ba8f 100644 --- a/indexers/blockindexer/block.go +++ b/indexers/blockindexer/block.go @@ -8,19 +8,19 @@ import ( "github.com/tendermint/tendermint/types" ) -// BlockIndexer defines an interface contract for indexing block events. + type BlockIndexer interface { - // Has returns true if the given height has been indexed. An error is returned - // upon database query failure. + + Has(height int64) (bool, error) - // Index indexes BeginBlock and EndBlock events for a given block by its height. + Index(types.EventDataNewBlockHeader) error - // Search performs a query for block heights that match a given BeginBlock - // and Endblock event search criteria. + + Search(ctx context.Context, q *query.Query) ([]int64, error) - // Delete indexed block entries up to (but not including) a height. It returns number of entries pruned. + Prune(from, to uint64, logger log.Logger) (uint64, error) } diff --git a/indexers/blockindexer/kv/kv.go b/indexers/blockindexer/kv/kv.go index bb8ee295c..d2b1b813a 100644 --- a/indexers/blockindexer/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -27,9 +27,9 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock -// events with an underlying KV store. Block events are indexed by their height, -// such that matching search criteria returns the respective block height(s). + + + type BlockerIndexer struct { store store.KV } @@ -40,8 +40,8 @@ func New(store store.KV) *BlockerIndexer { } } -// Has returns true if the given height has been indexed. An error is returned -// upon database query failure. + + func (idx *BlockerIndexer) Has(height int64) (bool, error) { key, err := heightKey(height) if err != nil { @@ -55,18 +55,18 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return err == nil, err } -// Index indexes BeginBlock and EndBlock events for a given block by its height. -// The following is indexed: -// -// primary key: encode(block.height | height) => encode(height) -// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) -// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) + + + + + + func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Discard() height := bh.Header.Height - // 1. index by height + key, err := heightKey(height) if err != nil { return fmt.Errorf("create block height index key: %w", err) @@ -75,18 +75,18 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return err } - // 2. index BeginBlock events + beginKeys, err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height) if err != nil { return fmt.Errorf("index BeginBlock events: %w", err) } - // 3. index EndBlock events + endKeys, err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height) if err != nil { return fmt.Errorf("index EndBlock events: %w", err) } - // 4. index all eventkeys by height key for easy pruning + err = idx.addEventKeys(height, &beginKeys, &endKeys, batch) if err != nil { return err @@ -94,11 +94,11 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return batch.Commit() } -// Search performs a query for block heights that match a given BeginBlock -// and Endblock event search criteria. The given query can match against zero, -// one or more block heights. In the case of height queries, i.e. block.height=H, -// if the height is indexed, that height alone will be returned. An error and -// nil slice is returned. Otherwise, a non-nil slice and nil error is returned. + + + + + func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { @@ -113,8 +113,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return nil, fmt.Errorf("parse query conditions: %w", err) } - // If there is an exact height query, return the result immediately - // (if it exists). + + height, ok := lookForHeight(conditions) if ok { ok, err := idx.Has(height) @@ -132,11 +132,11 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, var heightsInitialized bool filteredHeights := make(map[string][]byte) - // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) - // Extract ranges. If both upper and lower bounds exist, it's better to get - // them in order as to not iterate over kvs that are not within range. + + ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -155,8 +155,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - // Ignore any remaining conditions if the first condition resulted in no - // matches (assuming implicit AND operand). + + if len(filteredHeights) == 0 { break } @@ -169,7 +169,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - // for all other conditions + for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -188,8 +188,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - // Ignore any remaining conditions if the first condition resulted in no - // matches (assuming implicit AND operand). + + if len(filteredHeights) == 0 { break } @@ -201,7 +201,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - // fetch matching heights + results = make([]int64, 0, len(filteredHeights)) for _, hBz := range filteredHeights { cont := true @@ -232,12 +232,12 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return results, nil } -// matchRange returns all matching block heights that match a given QueryRange -// and start key. An already filtered result (filteredHeights) is provided such -// that any non-intersecting matches are removed. -// -// NOTE: The provided filteredHeights may be empty if no previous condition has -// matched. + + + + + + func (idx *BlockerIndexer) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -245,8 +245,8 @@ func (idx *BlockerIndexer) matchRange( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -314,18 +314,18 @@ LOOP: } if len(tmpHeights) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHeights, nil } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHeights { cont := true @@ -348,12 +348,12 @@ LOOP: return filteredHeights, nil } -// match returns all matching heights that meet a given query condition and start -// key. An already filtered result (filteredHeights) is provided such that any -// non-intersecting matches are removed. -// -// NOTE: The provided filteredHeights may be empty if no previous condition has -// matched. + + + + + + func (idx *BlockerIndexer) match( ctx context.Context, c query.Condition, @@ -361,8 +361,8 @@ func (idx *BlockerIndexer) match( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -457,18 +457,18 @@ func (idx *BlockerIndexer) match( } if len(tmpHeights) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHeights, nil } - // Remove/reduce matches in filteredHeights that were not found in this - // match (tmpHeights). + + for k := range filteredHeights { cont := true @@ -495,7 +495,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, heightBz := int64ToBytes(height) keys := dmtypes.EventKeys{} for _, event := range events { - // only index events with a non-empty type + if len(event.Type) == 0 { continue } @@ -505,7 +505,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, continue } - // index iff the event specified index:true and it's not a reserved event + compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if compositeKey == tmtypes.BlockHeightKey { return dmtypes.EventKeys{}, fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) @@ -546,9 +546,9 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint return nil } - for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 + for h := int64(from); h < int64(to); h++ { - // flush every 1000 blocks to avoid batches becoming too large + if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -592,7 +592,7 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint } - err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 + err := flush(batch, int64(to)) if err != nil { return 0, err } diff --git a/indexers/blockindexer/null/null.go b/indexers/blockindexer/null/null.go index e6ee3335f..ab80fa5a9 100644 --- a/indexers/blockindexer/null/null.go +++ b/indexers/blockindexer/null/null.go @@ -13,7 +13,7 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// TxIndex implements a no-op block indexer. + type BlockerIndexer struct{} func (idx *BlockerIndexer) Has(height int64) (bool, error) { diff --git a/indexers/blockindexer/query_range.go b/indexers/blockindexer/query_range.go index b4edf53c5..9b2798524 100644 --- a/indexers/blockindexer/query_range.go +++ b/indexers/blockindexer/query_range.go @@ -6,21 +6,21 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) -// QueryRanges defines a mapping between a composite event key and a QueryRange. -// -// e.g.account.number => queryRange{lowerBound: 1, upperBound: 5} + + + type QueryRanges map[string]QueryRange -// QueryRange defines a range within a query condition. + type QueryRange struct { - LowerBound interface{} // int || time.Time - UpperBound interface{} // int || time.Time + LowerBound interface{} + UpperBound interface{} Key string IncludeLowerBound bool IncludeUpperBound bool } -// AnyBound returns either the lower bound if non-nil, otherwise the upper bound. + func (qr QueryRange) AnyBound() interface{} { if qr.LowerBound != nil { return qr.LowerBound @@ -29,8 +29,8 @@ func (qr QueryRange) AnyBound() interface{} { return qr.UpperBound } -// LowerBoundValue returns the value for the lower bound. If the lower bound is -// nil, nil will be returned. + + func (qr QueryRange) LowerBoundValue() interface{} { if qr.LowerBound == nil { return nil @@ -52,8 +52,8 @@ func (qr QueryRange) LowerBoundValue() interface{} { } } -// UpperBoundValue returns the value for the upper bound. If the upper bound is -// nil, nil will be returned. + + func (qr QueryRange) UpperBoundValue() interface{} { if qr.UpperBound == nil { return nil @@ -75,8 +75,8 @@ func (qr QueryRange) UpperBoundValue() interface{} { } } -// LookForRanges returns a mapping of QueryRanges and the matching indexes in -// the provided query conditions. + + func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { @@ -110,8 +110,8 @@ func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes [] return ranges, indexes } -// IsRangeOperation returns a boolean signifying if a query Operator is a range -// operation or not. + + func IsRangeOperation(op query.Operator) bool { switch op { case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: diff --git a/indexers/txindex/indexer.go b/indexers/txindex/indexer.go index 281c1dccc..6e275a021 100644 --- a/indexers/txindex/indexer.go +++ b/indexers/txindex/indexer.go @@ -10,33 +10,33 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) -// TxIndexer interface defines methods to index and search transactions. + type TxIndexer interface { - // AddBatch analyzes, indexes and stores a batch of transactions. + AddBatch(b *Batch) error - // Index analyzes, indexes and stores a single transaction. + Index(result *abci.TxResult) error - // Get returns the transaction specified by hash or nil if the transaction is not indexed - // or stored. + + Get(hash []byte) (*abci.TxResult, error) - // Search allows you to query for transactions. + Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) - // Delete index entries for the heights between from (included) and to (not included). It returns heights pruned + Prune(from, to uint64, logger log.Logger) (uint64, error) } -// Batch groups together multiple Index operations to be performed at the same time. -// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. + + type Batch struct { Height int64 Ops []*abci.TxResult } -// NewBatch creates a new Batch. + func NewBatch(n int64, height int64) *Batch { return &Batch{ Height: height, @@ -44,16 +44,16 @@ func NewBatch(n int64, height int64) *Batch { } } -// Add or update an entry for the given result.Index. + func (b *Batch) Add(result *abci.TxResult) error { b.Ops[result.Index] = result return nil } -// Size returns the total number of operations inside the batch. + func (b *Batch) Size() int { return len(b.Ops) } -// ErrorEmptyHash indicates empty hash + var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/indexers/txindex/indexer_service.go b/indexers/txindex/indexer_service.go index e5ec76696..16e022f92 100644 --- a/indexers/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -11,14 +11,14 @@ import ( "github.com/tendermint/tendermint/types" ) -// XXX/TODO: These types should be moved to the indexer package. + const ( subscriber = "IndexerService" ) -// IndexerService connects event bus, transaction and block indexers together in -// order to index transactions and blocks coming from the event bus. + + type IndexerService struct { service.BaseService @@ -27,7 +27,7 @@ type IndexerService struct { eventBus *types.EventBus } -// NewIndexerService returns a new service instance. + func NewIndexerService( txIdxr TxIndexer, blockIdxr indexer.BlockIndexer, @@ -38,12 +38,12 @@ func NewIndexerService( return is } -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. + + func (is *IndexerService) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // cancelled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. + + + blockHeadersSub, err := is.eventBus.Subscribe( context.Background(), subscriber, @@ -94,16 +94,16 @@ func (is *IndexerService) OnStart() error { return nil } -// OnStop implements service.Service by unsubscribing from all transactions. + func (is *IndexerService) OnStop() { if is.eventBus.IsRunning() { _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) } } -// Prune removes tx and blocks indexed up to (but not including) a height. + func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { - // load indexer base height + indexerBaseHeight, err := s.LoadIndexerBaseHeight() if errors.Is(err, gerrc.ErrNotFound) { @@ -112,19 +112,19 @@ func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { return 0, err } - // prune indexed blocks + blockPruned, err := is.blockIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return blockPruned, err } - // prune indexes txs + txPruned, err := is.txIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return txPruned, err } - // store indexer base height + err = s.SaveIndexerBaseHeight(to) if err != nil { is.Logger.Error("saving indexer base height", "err", err) diff --git a/indexers/txindex/kv/kv.go b/indexers/txindex/kv/kv.go index e1ea88910..485ba01ea 100644 --- a/indexers/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -29,20 +29,20 @@ const ( var _ txindex.TxIndexer = (*TxIndex)(nil) -// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). + type TxIndex struct { store store.KV } -// NewTxIndex creates new KV indexer. + func NewTxIndex(store store.KV) *TxIndex { return &TxIndex{ store: store, } } -// Get gets transaction from the TxIndex storage and returns it or nil if the -// transaction is not found. + + func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { if len(hash) == 0 { return nil, txindex.ErrorEmptyHash @@ -65,10 +65,10 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return txResult, nil } -// AddBatch indexes a batch of transactions using the given list of events. Each -// key that indexed from the tx's events is a composite of the event type and -// the respective attribute's key delimited by a "." (eg. "account.number"). -// Any event with an empty type is not indexed. + + + + func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Discard() @@ -77,13 +77,13 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := types.Tx(result.Tx).Hash() - // index tx by events + eventKeys, err := txi.indexEvents(result, hash, storeBatch) if err != nil { return err } eventKeysBatch.Keys = append(eventKeysBatch.Keys, eventKeys.Keys...) - // index by height (always) + err = storeBatch.Set(keyForHeight(result), hash) if err != nil { return err @@ -93,7 +93,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { if err != nil { return err } - // index by hash (always) + err = storeBatch.Set(hash, rawBytes) if err != nil { return err @@ -108,29 +108,29 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return storeBatch.Commit() } -// Index indexes a single transaction using the given list of events. Each key -// that indexed from the tx's events is a composite of the event type and the -// respective attribute's key delimited by a "." (eg. "account.number"). -// Any event with an empty type is not indexed. + + + + func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Discard() hash := types.Tx(result.Tx).Hash() - // index tx by events + eventKeys, err := txi.indexEvents(result, hash, b) if err != nil { return err } - // add event keys height index + err = txi.addEventKeys(result.Height, &eventKeys, b) if err != nil { return nil } - // index by height (always) + err = b.Set(keyForHeight(result), hash) if err != nil { return err @@ -140,7 +140,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { if err != nil { return err } - // index by hash (always) + err = b.Set(hash, rawBytes) if err != nil { return err @@ -152,7 +152,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.KVBatch) (dmtypes.EventKeys, error) { eventKeys := dmtypes.EventKeys{} for _, event := range result.Result.Events { - // only index events with a non-empty type + if len(event.Type) == 0 { continue } @@ -162,7 +162,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. continue } - // index if `index: true` is set + compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if attr.GetIndex() { err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) @@ -177,17 +177,17 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. return eventKeys, nil } -// Search performs a search using the given query. -// -// It breaks the query into conditions (like "tx.height > 5"). For each -// condition, it queries the DB index. One special use cases here: (1) if -// "tx.hash" is found, it returns tx result for it (2) for range queries it is -// better for the client to provide both lower and upper bounds, so we are not -// performing a full scan. Results from querying indexes are then intersected -// and returned to the caller, in no particular order. -// -// Search will exit early and return any result fetched so far, -// when a message is received on the context chan. + + + + + + + + + + + func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { select { case <-ctx.Done(): @@ -199,13 +199,13 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul var hashesInitialized bool filteredHashes := make(map[string][]byte) - // get a list of conditions (like "tx.height > 5") + conditions, err := q.Conditions() if err != nil { return nil, fmt.Errorf("during parsing conditions from query: %w", err) } - // if there is a hash condition, return the result immediately + hash, ok, err := lookForHash(conditions) if err != nil { return nil, fmt.Errorf("during searching for a hash in the query: %w", err) @@ -221,12 +221,12 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) - // extract ranges - // if both upper and lower bounds exist, it's better to get them in order not - // no iterate over kvs that are not within range. + + + ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -236,8 +236,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true) hashesInitialized = true - // Ignore any remaining conditions if the first condition resulted - // in no matches (assuming implicit AND operand). + + if len(filteredHashes) == 0 { break } @@ -247,10 +247,10 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - // if there is a height condition ("tx.height=3"), extract it + height := lookForHeight(conditions) - // for all other conditions + for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -260,8 +260,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true) hashesInitialized = true - // Ignore any remaining conditions if the first condition resulted - // in no matches (assuming implicit AND operand). + + if len(filteredHashes) == 0 { break } @@ -283,7 +283,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } results = append(results, res) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -308,7 +308,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) return } -// lookForHeight returns a height if there is an "height=X" condition. + func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { if c.CompositeKey == tmtypes.TxHeightKey && c.Op == query.OpEqual { @@ -318,11 +318,11 @@ func lookForHeight(conditions []query.Condition) (height int64) { return 0 } -// match returns all matching txs by hash that meet a given condition and start -// key. An already filtered result (filteredHashes) is provided such that any -// non-intersecting matches are removed. -// -// NOTE: filteredHashes may be empty if no previous condition has matched. + + + + + func (txi *TxIndex) match( ctx context.Context, c query.Condition, @@ -330,8 +330,8 @@ func (txi *TxIndex) match( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -348,7 +348,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -364,8 +364,8 @@ func (txi *TxIndex) match( } case c.Op == query.OpExists: - // XXX: can't use startKeyBz here because c.Operand is nil - // (e.g. "account.owner//" won't match w/ a single row) + + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -374,7 +374,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -390,9 +390,9 @@ func (txi *TxIndex) match( } case c.Op == query.OpContains: - // XXX: startKey does not apply here. - // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" - // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" + + + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -407,7 +407,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() } - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -426,25 +426,25 @@ func (txi *TxIndex) match( } if len(tmpHashes) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHashes } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -460,11 +460,11 @@ func (txi *TxIndex) match( return filteredHashes } -// matchRange returns all matching txs by hash that meet a given queryRange and -// start key. An already filtered result (filteredHashes) is provided such that -// any non-intersecting matches are removed. -// -// NOTE: filteredHashes may be empty if no previous condition has matched. + + + + + func (txi *TxIndex) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -472,8 +472,8 @@ func (txi *TxIndex) matchRange( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -512,15 +512,15 @@ LOOP: tmpHashes[string(it.Value())] = it.Value() } - // XXX: passing time in a ABCI Events is not yet implemented - // case time.Time: - // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - // if v == r.upperBound { - // break - // } + + + + + + } - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -536,25 +536,25 @@ LOOP: } if len(tmpHashes) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHashes } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -592,9 +592,9 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 return nil } - for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 + for h := int64(from); h < int64(to); h++ { - // flush every 1000 txs to avoid batches becoming too large + if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -605,7 +605,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 toFlush = 0 } - // first all events are pruned associated to the same height + prunedEvents, err := txi.pruneEvents(h, batch) pruned += prunedEvents toFlush += prunedEvents @@ -614,10 +614,10 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 continue } - // then all txs indexed are iterated by height + it := txi.store.PrefixIterator(prefixForHeight(h)) - // and deleted all indexed (by hash and by keyheight) + for ; it.Valid(); it.Next() { toFlush++ if err := batch.Delete(it.Key()); err != nil { @@ -635,7 +635,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 } - err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 + err := flush(batch, int64(to)) if err != nil { return 0, err } @@ -669,7 +669,7 @@ func (txi *TxIndex) pruneEvents(height int64, batch store.KVBatch) (uint64, erro } func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batch store.KVBatch) error { - // index event keys by height + eventKeyHeight, err := eventHeightKey(height) if err != nil { return err @@ -684,7 +684,7 @@ func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batc return nil } -// Keys + func isTagKey(key []byte) bool { return strings.Count(string(key), tagKeySeparator) == 3 diff --git a/indexers/txindex/kv/utils.go b/indexers/txindex/kv/utils.go index 73cb223f2..05cb12c90 100644 --- a/indexers/txindex/kv/utils.go +++ b/indexers/txindex/kv/utils.go @@ -4,7 +4,7 @@ import "github.com/google/orderedcode" const TxEventHeightKey = "txevent.height" -// IntInSlice returns true if a is found in the list. + func intInSlice(a int, list []int) bool { for _, b := range list { if b == a { diff --git a/indexers/txindex/null/null.go b/indexers/txindex/null/null.go index 426b08099..7d2167389 100644 --- a/indexers/txindex/null/null.go +++ b/indexers/txindex/null/null.go @@ -13,20 +13,20 @@ import ( var _ txindex.TxIndexer = (*TxIndex)(nil) -// TxIndex acts as a /dev/null. + type TxIndex struct{} -// Get on a TxIndex is disabled and panics when invoked. + func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } -// AddBatch is a noop and always returns nil. + func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } -// Index is a noop and always returns nil. + func (txi *TxIndex) Index(result *abci.TxResult) error { return nil } diff --git a/mempool/cache.go b/mempool/cache.go index 78aefa3c4..fdb11ea5b 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -7,31 +7,31 @@ import ( "github.com/tendermint/tendermint/types" ) -// TxCache defines an interface for raw transaction caching in a mempool. -// Currently, a TxCache does not allow direct reading or getting of transaction -// values. A TxCache is used primarily to push transactions and removing -// transactions. Pushing via Push returns a boolean telling the caller if the -// transaction already exists in the cache or not. + + + + + type TxCache interface { - // Reset resets the cache to an empty state. + Reset() - // Push adds the given raw transaction to the cache and returns true if it was - // newly added. Otherwise, it returns false. + + Push(tx types.Tx) bool - // Remove removes the given raw transaction from the cache. + Remove(tx types.Tx) - // Has reports whether tx is present in the cache. Checking for presence is - // not treated as an access of the value. + + Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) -// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache -// only stores the hash of the raw transaction. + + type LRUTxCache struct { mtx sync.Mutex size int @@ -47,8 +47,8 @@ func NewLRUTxCache(cacheSize int) *LRUTxCache { } } -// GetList returns the underlying linked-list that backs the LRU cache. Note, -// this should be used for testing purposes only! + + func (c *LRUTxCache) GetList() *list.List { return c.list } @@ -109,7 +109,7 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { return ok } -// NopTxCache defines a no-op raw transaction cache. + type NopTxCache struct{} var _ TxCache = (*NopTxCache)(nil) diff --git a/mempool/clist/clist.go b/mempool/clist/clist.go index 2e4171b1c..ff94a4b49 100644 --- a/mempool/clist/clist.go +++ b/mempool/clist/clist.go @@ -1,15 +1,6 @@ package clist -/* -The purpose of CList is to provide a goroutine-safe linked-list. -This list can be traversed concurrently by any number of goroutines. -However, removed CElements cannot be added back. -NOTE: Not all methods of container/list are (yet) implemented. -NOTE: Removed elements need to DetachPrev or DetachNext consistently -to ensure garbage collection of removed elements. - -*/ import ( "fmt" @@ -18,29 +9,12 @@ import ( tmsync "github.com/tendermint/tendermint/libs/sync" ) -// MaxLength is the max allowed number of elements a linked list is -// allowed to contain. -// If more elements are pushed to the list it will panic. + + + const MaxLength = int(^uint(0) >> 1) -/* -CElement is an element of a linked-list -Traversal from a CElement is goroutine-safe. - -We can't avoid using WaitGroups or for-loops given the documentation -spec without re-implementing the primitives that already exist in -golang/sync. Notice that WaitGroup allows many go-routines to be -simultaneously released, which is what we want. Mutex doesn't do -this. RWMutex does this, but it's clumsy to use in the way that a -WaitGroup would be used -- and we'd end up having two RWMutex's for -prev/next each, which is doubly confusing. - -sync.Cond would be sort-of useful, but we don't need a write-lock in -the for-loop. Use sync.Cond when you need serial access to the -"condition". In our case our condition is if `next != nil || removed`, -and there's no reason to serialize that condition for goroutines -waiting on NextWait() (since it's just a read operation). -*/ + type CElement struct { mtx tmsync.RWMutex prev *CElement @@ -51,11 +25,11 @@ type CElement struct { nextWaitCh chan struct{} removed bool - Value interface{} // immutable + Value interface{} } -// Blocking implementation of Next(). -// May return nil iff CElement was tail and got removed. + + func (e *CElement) NextWait() *CElement { for { e.mtx.RLock() @@ -69,13 +43,13 @@ func (e *CElement) NextWait() *CElement { } nextWg.Wait() - // e.next doesn't necessarily exist here. - // That's why we need to continue a for-loop. + + } } -// Blocking implementation of Prev(). -// May return nil iff CElement was head and got removed. + + func (e *CElement) PrevWait() *CElement { for { e.mtx.RLock() @@ -92,8 +66,8 @@ func (e *CElement) PrevWait() *CElement { } } -// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, -// channel will be closed. + + func (e *CElement) PrevWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -101,8 +75,8 @@ func (e *CElement) PrevWaitChan() <-chan struct{} { return e.prevWaitCh } -// NextWaitChan can be used to wait until Next becomes not nil. Once it does, -// channel will be closed. + + func (e *CElement) NextWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -110,7 +84,7 @@ func (e *CElement) NextWaitChan() <-chan struct{} { return e.nextWaitCh } -// Nonblocking, may return nil if at the end. + func (e *CElement) Next() *CElement { e.mtx.RLock() val := e.next @@ -118,7 +92,7 @@ func (e *CElement) Next() *CElement { return val } -// Nonblocking, may return nil if at the end. + func (e *CElement) Prev() *CElement { e.mtx.RLock() prev := e.prev @@ -153,20 +127,20 @@ func (e *CElement) DetachPrev() { e.mtx.Unlock() } -// NOTE: This function needs to be safe for -// concurrent goroutines waiting on nextWg. + + func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() oldNext := e.next e.next = newNext if oldNext != nil && newNext == nil { - // See https://golang.org/pkg/sync/: - // - // If a WaitGroup is reused to wait for several independent sets of - // events, new Add calls must happen after all previous Wait calls have - // returned. - e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + + + + + + e.nextWg = waitGroup1() e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -176,15 +150,15 @@ func (e *CElement) SetNext(newNext *CElement) { e.mtx.Unlock() } -// NOTE: This function needs to be safe for -// concurrent goroutines waiting on prevWg + + func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWg = waitGroup1() e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -199,7 +173,7 @@ func (e *CElement) SetRemoved() { e.removed = true - // This wakes up anyone waiting in either direction. + if e.prev == nil { e.prevWg.Done() close(e.prevWaitCh) @@ -211,20 +185,20 @@ func (e *CElement) SetRemoved() { e.mtx.Unlock() } -//-------------------------------------------------------------------------------- -// CList represents a linked list. -// The zero value for CList is an empty list ready to use. -// Operations are goroutine-safe. -// Panics if length grows beyond the max. + + + + + type CList struct { mtx tmsync.RWMutex wg *sync.WaitGroup waitCh chan struct{} - head *CElement // first element - tail *CElement // last element - len int // list length - maxLen int // max list length + head *CElement + tail *CElement + len int + maxLen int } func (l *CList) Init() *CList { @@ -239,11 +213,11 @@ func (l *CList) Init() *CList { return l } -// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. + func New() *CList { return newWithMax(MaxLength) } -// Return CList with given maxLength. -// Will panic if list exceeds given maxLength. + + func newWithMax(maxLength int) *CList { l := new(CList) l.maxLen = maxLength @@ -265,7 +239,7 @@ func (l *CList) Front() *CElement { } func (l *CList) FrontWait() *CElement { - // Loop until the head is non-nil else wait and try again + for { l.mtx.RLock() head := l.head @@ -276,7 +250,7 @@ func (l *CList) FrontWait() *CElement { return head } wg.Wait() - // NOTE: If you think l.head exists here, think harder. + } } @@ -298,13 +272,13 @@ func (l *CList) BackWait() *CElement { return tail } wg.Wait() - // l.tail doesn't necessarily exist here. - // That's why we need to continue a for-loop. + + } } -// WaitChan can be used to wait until Front or Back becomes not nil. Once it -// does, channel will be closed. + + func (l *CList) WaitChan() <-chan struct{} { l.mtx.Lock() defer l.mtx.Unlock() @@ -312,11 +286,11 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } -// Panics if list grows beyond its max length. + func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - // Construct a new element + e := &CElement{ prev: nil, prevWg: waitGroup1(), @@ -328,7 +302,7 @@ func (l *CList) PushBack(v interface{}) *CElement { Value: v, } - // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { l.wg.Done() close(l.waitCh) @@ -338,21 +312,21 @@ func (l *CList) PushBack(v interface{}) *CElement { } l.len++ - // Modify the tail + if l.tail == nil { l.head = e l.tail = e } else { - e.SetPrev(l.tail) // We must init e first. - l.tail.SetNext(e) // This will make e accessible. - l.tail = e // Update the list. + e.SetPrev(l.tail) + l.tail.SetNext(e) + l.tail = e } l.mtx.Unlock() return e } -// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. -// NOTE: As per the contract of CList, removed elements cannot be added back. + + func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() @@ -372,16 +346,16 @@ func (l *CList) Remove(e *CElement) interface{} { panic("Remove(e) with false tail") } - // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { - l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.wg = waitGroup1() l.waitCh = make(chan struct{}) } - // Update l.len + l.len-- - // Connect next/prev and set head/tail + if prev == nil { l.head = next } else { @@ -393,7 +367,7 @@ func (l *CList) Remove(e *CElement) interface{} { next.SetPrev(prev) } - // Set .Done() on e, otherwise waiters will wait forever. + e.SetRemoved() l.mtx.Unlock() diff --git a/mempool/ids.go b/mempool/ids.go index d64a07bda..5afb3bc92 100644 --- a/mempool/ids.go +++ b/mempool/ids.go @@ -1,3 +1,3 @@ package mempool -// These functions were moved into v0/reactor.go and v1/reactor.go + diff --git a/mempool/mempool.go b/mempool/mempool.go index 48aa380f4..dbbec0e02 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -13,107 +13,107 @@ import ( const ( MempoolChannel = byte(0x30) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) + + UnknownPeerID uint16 = 0 MaxActiveIDs = math.MaxUint16 ) -// Mempool defines the mempool interface. -// -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. + + + + type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. + + CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - // RemoveTxByKey removes a transaction, identified by its key, - // from the mempool. + + RemoveTxByKey(txKey types.TxKey) error - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). + + + + + + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). + + + ReapMaxTxs(max int) types.Txs - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. + + Lock() - // Unlock unlocks the mempool. + Unlock() - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. + + + + + + Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error - // SetPreCheckFn sets the pre-check function. + SetPreCheckFn(fn PreCheckFunc) - // SetPostCheckFn sets the post-check function. + SetPostCheckFn(fn PostCheckFunc) - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. - // - // NOTE: - // 1. Lock/Unlock must be managed by caller. + + + + + FlushAppConn() error - // Flush removes all transactions from the mempool and caches. + Flush() - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. + + + + + TxsAvailable() <-chan struct{} - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. + + EnableTxsAvailable() - // Size returns the number of transactions in the mempool. + Size() int - // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. + + + type PreCheckFunc func(types.Tx) error -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. + + + type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. + + func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) @@ -126,8 +126,8 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { } } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. + + func PostCheckMaxGas(maxGas int64) PostCheckFunc { return func(tx types.Tx, res *abci.ResponseCheckTx) error { if maxGas == -1 { @@ -146,14 +146,14 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { } } -// ErrTxInCache is returned to the client if we saw tx earlier + var ErrTxInCache = errors.New("tx already exists in cache") -// TxKey is the fixed length array key used as an index. + type TxKey [sha256.Size]byte -// ErrTxTooLarge defines an error when a transaction is too big to be sent in a -// message to other peers. + + type ErrTxTooLarge struct { Max int Actual int @@ -163,8 +163,8 @@ func (e ErrTxTooLarge) Error() string { return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) } -// ErrMempoolIsFull defines an error where Tendermint and the application cannot -// handle that much load. + + type ErrMempoolIsFull struct { NumTxs int MaxTxs int @@ -182,7 +182,7 @@ func (e ErrMempoolIsFull) Error() string { ) } -// ErrPreCheck defines an error where a transaction fails a pre-check. + type ErrPreCheck struct { Reason error } @@ -191,7 +191,7 @@ func (e ErrPreCheck) Error() string { return e.Reason.Error() } -// IsPreCheckError returns true if err is due to pre check failure. + func IsPreCheckError(err error) bool { return errors.As(err, &ErrPreCheck{}) } diff --git a/mempool/metrics.go b/mempool/metrics.go index 5d3022e80..613715038 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -8,42 +8,42 @@ import ( ) const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. + + MetricsSubsystem = "mempool" ) -// Metrics contains metrics exposed by this package. -// see MetricsProvider for descriptions. + + type Metrics struct { - // Size of the mempool. + Size metrics.Gauge - // Histogram of transaction sizes, in bytes. + TxSizeBytes metrics.Histogram - // Number of failed transactions. + FailedTxs metrics.Counter - // RejectedTxs defines the number of rejected transactions. These are - // transactions that passed CheckTx but failed to make it into the mempool - // due to resource limits, e.g. mempool is full and no lower priority - // transactions exist in the mempool. + + + + RejectedTxs metrics.Counter - // EvictedTxs defines the number of evicted transactions. These are valid - // transactions that passed CheckTx and existed in the mempool but were later - // evicted to make room for higher priority valid transactions that passed - // CheckTx. + + + + EvictedTxs metrics.Counter - // Number of times transactions are rechecked in the mempool. + RecheckTimes metrics.Counter } -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). + + + func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels := []string{} for i := 0; i < len(labelsAndValues); i += 2 { @@ -95,7 +95,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { } } -// NopMetrics returns no-op Metrics. + func NopMetrics() *Metrics { return &Metrics{ Size: discard.NewGauge(), diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go index 3f293381f..014816a9e 100644 --- a/mempool/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" ) -// Mempool is an empty implementation of a Mempool, useful for testing. + type Mempool struct{} var _ mempool.Mempool = Mempool{} diff --git a/mempool/tx.go b/mempool/tx.go index d13f3d6b8..191f1cbc0 100644 --- a/mempool/tx.go +++ b/mempool/tx.go @@ -4,14 +4,14 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// TxInfo are parameters that get passed when attempting to add a tx to the -// mempool. + + type TxInfo struct { - // SenderID is the internal peer ID used in the mempool to identify the - // sender, storing two bytes with each transaction instead of 20 bytes for - // the types.NodeID. + + + SenderID uint16 - // SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging. + SenderP2PID p2p.ID } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index 1d0eb3a9b..a543b64ab 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -20,45 +20,45 @@ import ( var _ mempool.Mempool = (*TxMempool)(nil) -// TxMempoolOption sets an optional parameter on the TxMempool. + type TxMempoolOption func(*TxMempool) -// TxMempool implemements the Mempool interface and allows the application to -// set priority values on transactions in the CheckTx response. When selecting -// transactions to include in a block, higher-priority transactions are chosen -// first. When evicting transactions from the mempool for size constraints, -// lower-priority transactions are evicted sooner. -// -// Within the mempool, transactions are ordered by time of arrival, and are -// gossiped to the rest of the network based on that order (gossip order does -// not take priority into account). + + + + + + + + + type TxMempool struct { - // Immutable fields + logger log.Logger config *config.MempoolConfig proxyAppConn proxy.AppConnMempool metrics *mempool.Metrics - cache mempool.TxCache // seen transactions + cache mempool.TxCache - // Atomically-updated fields - txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes - txRecheck int64 // atomic: the number of pending recheck calls + + txsBytes int64 + txRecheck int64 - // Synchronized fields, protected by mtx. + mtx *sync.RWMutex notifiedTxsAvailable bool - txsAvailable chan struct{} // one value sent per height when mempool is not empty + txsAvailable chan struct{} preCheck mempool.PreCheckFunc postCheck mempool.PostCheckFunc - height int64 // the latest height passed to Update + height int64 - txs *clist.CList // valid transactions (passed CheckTx) + txs *clist.CList txByKey map[types.TxKey]*clist.CElement - txBySender map[string]*clist.CElement // for sender != "" + txBySender map[string]*clist.CElement } -// NewTxMempool constructs a new, empty priority mempool at the specified -// initial height and using the given config and options. + + func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -91,59 +91,59 @@ func NewTxMempool( return txmp } -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. + + + func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.preCheck = f } } -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. + + + func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.postCheck = f } } -// WithMetrics sets the mempool's metrics collector. + func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { return func(txmp *TxMempool) { txmp.metrics = metrics } } -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. + + func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } -// Unlock releases a write-lock on the mempool. + func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. + + func (txmp *TxMempool) Size() int { return txmp.txs.Len() } -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. + + func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before -// calling FlushAppConn. + + + + func (txmp *TxMempool) FlushAppConn() error { - // N.B.: We have to issue the call outside the lock so that its callback can - // fire. It's safe to do this, the flush will block until complete. - // - // We could just not require the caller to hold the lock at all, but the - // semantics of the Mempool interface require the caller to hold it, and we - // can't change that without disrupting existing use. + + + + + + txmp.mtx.Unlock() defer txmp.mtx.Lock() return txmp.proxyAppConn.FlushSync() } -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. + + func (txmp *TxMempool) EnableTxsAvailable() { txmp.mtx.Lock() defer txmp.mtx.Unlock() @@ -151,60 +151,60 @@ func (txmp *TxMempool) EnableTxsAvailable() { txmp.txsAvailable = make(chan struct{}, 1) } -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. + + func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } -// CheckTx adds the given transaction to the mempool if it fits and passes the -// application's ABCI CheckTx method. -// -// CheckTx reports an error without adding tx if: -// -// - The size of tx exceeds the configured maximum transaction size. -// - The pre-check hook is defined and reports an error for tx. -// - The transaction already exists in the cache. -// - The proxy connection to the application fails. -// -// If tx passes all of the above conditions, it is passed (asynchronously) to -// the application's ABCI CheckTx method and this CheckTx method returns nil. -// If cb != nil, it is called when the ABCI request completes to report the -// application response. -// -// If the application accepts the transaction and the mempool is full, the -// mempool evicts one or more of the lowest-priority transaction whose priority -// is (strictly) lower than the priority of tx and whose size together exceeds -// the size of tx, and adds tx instead. If no such transactions exist, tx is -// discarded. + + + + + + + + + + + + + + + + + + + + func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { - // During the initial phase of CheckTx, we do not need to modify any state. - // A transaction will not actually be added to the mempool until it survives - // a call to the ABCI CheckTx method and size constraint checks. + + + height, err := func() (int64, error) { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - // Reject transactions in excess of the configured maximum transaction size. + if len(tx) > txmp.config.MaxTxBytes { return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - // If a precheck hook is defined, call it before invoking the application. + if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { return 0, mempool.ErrPreCheck{Reason: err} } } - // Early exit if the proxy connection has an error. + if err := txmp.proxyAppConn.Error(); err != nil { return 0, err } txKey := tx.Key() - // Check for the transaction in the cache. + if !txmp.cache.Push(tx) { - // If the cached transaction is also in the pool, record its sender. + if elt, ok := txmp.txByKey[txKey]; ok { w, _ := elt.Value.(*WrappedTx) w.SetPeer(txInfo.SenderID) @@ -217,13 +217,13 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return err } - // Initiate an ABCI CheckTx for this transaction. The callback is - // responsible for adding the transaction to the pool if it survives. - // - // N.B.: We have to issue the call outside the lock. In a local client, - // even an "async" call invokes its callback immediately which will make - // the callback deadlock trying to acquire the same lock. This isn't a - // problem with out-of-process calls, but this has to work for both. + + + + + + + reqRes := txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) if err := txmp.proxyAppConn.FlushSync(); err != nil { return err @@ -244,17 +244,17 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return nil } -// RemoveTxByKey removes the transaction with the specified key from the -// mempool. It reports an error if no such transaction exists. This operation -// does not remove the transaction from the cache. + + + func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { txmp.mtx.Lock() defer txmp.mtx.Unlock() return txmp.removeTxByKey(txKey) } -// removeTxByKey removes the specified transaction key from the mempool. -// The caller must hold txmp.mtx excluxively. + + func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { if elt, ok := txmp.txByKey[key]; ok { w, _ := elt.Value.(*WrappedTx) @@ -269,8 +269,8 @@ func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { return fmt.Errorf("transaction %x not found", key) } -// removeTxByElement removes the specified transaction element from the mempool. -// The caller must hold txmp.mtx exclusively. + + func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { w, _ := elt.Value.(*WrappedTx) delete(txmp.txByKey, w.tx.Key()) @@ -281,14 +281,14 @@ func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { atomic.AddInt64(&txmp.txsBytes, -w.Size()) } -// Flush purges the contents of the mempool and the cache, leaving both empty. -// The current height is not modified by this operation. + + func (txmp *TxMempool) Flush() { txmp.mtx.Lock() defer txmp.mtx.Unlock() - // Remove all the transactions in the list explicitly, so that the sizes - // and indexes get updated properly. + + cur := txmp.txs.Front() for cur != nil { next := cur.Next() @@ -297,14 +297,14 @@ func (txmp *TxMempool) Flush() { } txmp.cache.Reset() - // Discard any pending recheck calls that may be in flight. The calls will - // still complete, but will have no effect on the mempool. + + atomic.StoreInt64(&txmp.txRecheck, 0) } -// allEntriesSorted returns a slice of all the transactions currently in the -// mempool, sorted in nonincreasing order by priority with ties broken by -// increasing order of arrival time. + + + func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() @@ -317,28 +317,28 @@ func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { if all[i].priority == all[j].priority { return all[i].timestamp.Before(all[j].timestamp) } - return all[i].priority > all[j].priority // N.B. higher priorities first + return all[i].priority > all[j].priority }) return all } -// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the -// size and gas constraints. The results are ordered by nonincreasing priority, -// with ties broken by increasing order of arrival. Reaping transactions does -// not remove them from the mempool. -// -// If maxBytes < 0, no limit is set on the total size in bytes. -// If maxGas < 0, no limit is set on the total gas cost. -// -// If the mempool is empty or has no transactions fitting within the given -// constraints, the result will also be empty. + + + + + + + + + + func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { var totalGas, totalBytes int64 - var keep []types.Tx //nolint:prealloc + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { - // N.B. When computing byte size, we need to include the overhead for - // encoding as protobuf to send to the application. + + totalGas += w.gasWanted totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { @@ -349,24 +349,24 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { return keep } -// TxsWaitChan returns a channel that is closed when there is at least one -// transaction available to be gossiped. + + func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } -// TxsFront returns the frontmost element of the pending transaction list. -// It will be nil if the mempool is empty. + + func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } -// ReapMaxTxs returns up to max transactions from the mempool. The results are -// ordered by nonincreasing priority with ties broken by increasing order of -// arrival. Reaping transactions does not remove them from the mempool. -// -// If max < 0, all transactions in the mempool are reaped. -// -// The result may have fewer than max elements (possibly zero) if the mempool -// does not have that many transactions available. + + + + + + + + func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - var keep []types.Tx //nolint:prealloc + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { if max >= 0 && len(keep) >= max { @@ -377,28 +377,28 @@ func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { return keep } -// Update removes all the given transactions from the mempool and the cache, -// and updates the current block height. The blockTxs and deliverTxResponses -// must have the same length with each response corresponding to the tx at the -// same offset. -// -// If the configuration enables recheck, Update sends each remaining -// transaction after removing blockTxs to the ABCI CheckTx method. Any -// transactions marked as invalid during recheck are also removed. -// -// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before -// calling Update. + + + + + + + + + + + func (txmp *TxMempool) Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error { - // Safety sanity check: The caller is required to hold the lock. + if txmp.mtx.TryLock() { txmp.mtx.Unlock() panic("mempool: Update caller does not hold the lock") } - // Safety check: Transactions and responses must match in number. + if len(blockTxs) != len(deliverTxResponses) { panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses", len(blockTxs), len(deliverTxResponses))) @@ -408,24 +408,24 @@ func (txmp *TxMempool) Update( txmp.notifiedTxsAvailable = false for i, tx := range blockTxs { - // Add successful committed transactions to the cache (if they are not - // already present). Transactions that failed to commit are removed from - // the cache unless the operator has explicitly requested we keep them. + + + if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(tx) } - // Regardless of success, remove the transaction from the mempool. + _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) - // If there are any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. + + + size := txmp.Size() txmp.metrics.Size.Set(float64(size)) if size > 0 { @@ -446,19 +446,19 @@ func (txmp *TxMempool) SetPostCheckFn(fn mempool.PostCheckFunc) { txmp.postCheck = fn } -// initialTxCallback handles the ABCI CheckTx response for the first time a -// transaction is added to the mempool. A recheck after a block is committed -// goes to the default callback (see recheckTxCallback). -// -// If either the application rejected the transaction or a post-check hook is -// defined and rejects the transaction, it is discarded. -// -// Otherwise, if the mempool is full, check for lower-priority transactions -// that can be evicted to make room for the new one. If no such transactions -// exist, this transaction is logged and dropped; otherwise the selected -// transactions are evicted. -// -// Finally, the new transaction is added and size stats updated. + + + + + + + + + + + + + func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { @@ -490,14 +490,14 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.metrics.FailedTxs.Add(1) - // Remove the invalid transaction from the cache, unless the operator has - // instructed us to keep invalid transactions. + + if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - // If there was a post-check error, record its text in the result for - // debugging purposes. + + if err != nil { checkTxRes.CheckTx.MempoolError = err.Error() } @@ -507,9 +507,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { priority := checkTxRes.CheckTx.Priority sender := checkTxRes.CheckTx.Sender - // Disallow multiple concurrent transactions from the same sender assigned - // by the ABCI application. As a special case, an empty sender is not - // restricted. + + + if sender != "" { elt, ok := txmp.txBySender[sender] if ok { @@ -526,15 +526,15 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - // At this point the application has ruled the transaction valid, but the - // mempool might be full. If so, find the lowest-priority items with lower - // priority than the application assigned to this new one, and evict as many - // of them as necessary to make room for tx. If no such items exist, we - // discard tx. + + + + + if err := txmp.canAddTx(wtx); err != nil { - var victims []*clist.CElement // eligible transactions for eviction - var victimBytes int64 // total size of victims + var victims []*clist.CElement + var victimBytes int64 for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { cw := cur.Value.(*WrappedTx) if cw.priority < priority { @@ -543,9 +543,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - // If there are no suitable eviction candidates, or the total size of - // those candidates is not enough to make room for the new transaction, - // drop the new one. + + + if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( @@ -564,8 +564,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { "new_priority", priority, ) - // Sort lowest priority items first so they will be evicted first. Break - // ties in favor of newer items (to maintain FIFO semantics in a group). + + sort.Slice(victims, func(i, j int) bool { iw := victims[i].Value.(*WrappedTx) jw := victims[j].Value.(*WrappedTx) @@ -575,7 +575,7 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { return iw.Priority() < jw.Priority() }) - // Evict as many of the victims as necessary to make room. + var evictedBytes int64 for _, vic := range victims { w := vic.Value.(*WrappedTx) @@ -589,8 +589,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) - // We may not need to evict all the eligible transactions. Bail out - // early if we have made enough room. + + evictedBytes += w.Size() if evictedBytes >= wtx.Size() { break @@ -625,26 +625,26 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { atomic.AddInt64(&txmp.txsBytes, wtx.Size()) } -// recheckTxCallback handles the responses from ABCI CheckTx calls issued -// during the recheck phase of a block Update. It updates the recheck counter -// and removes any transactions invalidated by the application. -// -// This callback is NOT executed for the initial CheckTx on a new transaction; -// that case is handled by initialTxCallback instead. + + + + + + func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { - // Don't log this; this is the default callback and other response types - // can safely be ignored. + + return } - // Check whether we are expecting recheck responses at this point. - // If not, we will ignore the response, this usually means the mempool was Flushed. - // If this is the "last" pending recheck, trigger a notification when it's been processed. + + + numLeft := atomic.AddInt64(&txmp.txRecheck, -1) if numLeft == 0 { - defer txmp.notifyTxsAvailable() // notify waiters on return, if mempool is non-empty + defer txmp.notifyTxsAvailable() } else if numLeft < 0 { return } @@ -655,16 +655,16 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.mtx.Lock() defer txmp.mtx.Unlock() - // Find the transaction reported by the ABCI callback. It is possible the - // transaction was evicted during the recheck, in which case the transaction - // will be gone. + + + elt, ok := txmp.txByKey[tx.Key()] if !ok { return } wtx := elt.Value.(*WrappedTx) - // If a postcheck hook is defined, call it before checking the result. + var err error if txmp.postCheck != nil { err = txmp.postCheck(tx, checkTxRes.CheckTx) @@ -672,7 +672,7 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { wtx.SetPriority(checkTxRes.CheckTx.Priority) - return // N.B. Size of mempool did not change + return } txmp.logger.Debug( @@ -690,12 +690,12 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.Size.Set(float64(txmp.Size())) } -// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions -// currently in the mempool. It reports the number of recheck calls that were -// successfully initiated. -// -// Precondition: The mempool is not empty. -// The caller must hold txmp.mtx exclusively. + + + + + + func (txmp *TxMempool) recheckTransactions() { if txmp.Size() == 0 { panic("mempool: cannot run recheck on an empty mempool") @@ -705,10 +705,10 @@ func (txmp *TxMempool) recheckTransactions() { "num_txs", txmp.Size(), "height", txmp.height, ) - // N.B.: We have to issue the calls outside the lock. In a local client, - // even an "async" call invokes its callback immediately which will make the - // callback deadlock trying to acquire the same lock. This isn't a problem - // with out-of-process calls, but this has to work for both. + + + + txmp.mtx.Unlock() defer txmp.mtx.Lock() @@ -716,7 +716,7 @@ func (txmp *TxMempool) recheckTransactions() { for e := txmp.txs.Front(); e != nil; e = e.Next() { wtx := e.Value.(*WrappedTx) - // The response for this CheckTx is handled by the default recheckTxCallback. + _ = txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ Tx: wtx.tx, Type: abci.CheckTxType_Recheck, @@ -730,9 +730,9 @@ func (txmp *TxMempool) recheckTransactions() { txmp.proxyAppConn.FlushAsync() } -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is -// returned and the transaction can be inserted into the mempool. + + + func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { numTxs := txmp.Size() txBytes := txmp.SizeBytes() @@ -749,21 +749,21 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } -// purgeExpiredTxs removes all transactions from the mempool that have exceeded -// their respective height or time-based limits as of the given blockHeight. -// Transactions removed by this operation are not removed from the cache. -// -// The caller must hold txmp.mtx exclusively. + + + + + func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { - return // nothing to do + return } now := time.Now() cur := txmp.txs.Front() for cur != nil { - // N.B. Grab the next element first, since if we remove cur its successor - // will be invalidated. + + next := cur.Next() w := cur.Value.(*WrappedTx) @@ -782,11 +782,11 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - return // nothing to do + return } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true select { diff --git a/mempool/v1/tx.go b/mempool/v1/tx.go index 88522a8a7..88134c052 100644 --- a/mempool/v1/tx.go +++ b/mempool/v1/tx.go @@ -7,25 +7,25 @@ import ( "github.com/tendermint/tendermint/types" ) -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. + + type WrappedTx struct { - tx types.Tx // the original transaction data - hash types.TxKey // the transaction hash - height int64 // height when this transaction was initially checked (for expiry) - timestamp time.Time // time when transaction was entered (for TTL) + tx types.Tx + hash types.TxKey + height int64 + timestamp time.Time mtx sync.Mutex - gasWanted int64 // app: gas required to execute this transaction - priority int64 // app: priority value for this transaction - sender string // app: assigned sender label - peers map[uint16]bool // peer IDs who have sent us this transaction + gasWanted int64 + priority int64 + sender string + peers map[uint16]bool } -// Size reports the size of the raw transaction in bytes. + func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } -// SetPeer adds the specified peer ID as a sender of w. + func (w *WrappedTx) SetPeer(id uint16) { w.mtx.Lock() defer w.mtx.Unlock() @@ -36,7 +36,7 @@ func (w *WrappedTx) SetPeer(id uint16) { } } -// HasPeer reports whether the specified peer ID is a sender of w. + func (w *WrappedTx) HasPeer(id uint16) bool { w.mtx.Lock() defer w.mtx.Unlock() @@ -44,42 +44,42 @@ func (w *WrappedTx) HasPeer(id uint16) bool { return ok } -// SetGasWanted sets the application-assigned gas requirement of w. + func (w *WrappedTx) SetGasWanted(gas int64) { w.mtx.Lock() defer w.mtx.Unlock() w.gasWanted = gas } -// GasWanted reports the application-assigned gas requirement of w. + func (w *WrappedTx) GasWanted() int64 { w.mtx.Lock() defer w.mtx.Unlock() return w.gasWanted } -// SetSender sets the application-assigned sender of w. + func (w *WrappedTx) SetSender(sender string) { w.mtx.Lock() defer w.mtx.Unlock() w.sender = sender } -// Sender reports the application-assigned sender of w. + func (w *WrappedTx) Sender() string { w.mtx.Lock() defer w.mtx.Unlock() return w.sender } -// SetPriority sets the application-assigned priority of w. + func (w *WrappedTx) SetPriority(p int64) { w.mtx.Lock() defer w.mtx.Unlock() w.priority = p } -// Priority reports the application-assigned priority of w. + func (w *WrappedTx) Priority() int64 { w.mtx.Lock() defer w.mtx.Unlock() diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go index 2ba9eee27..6098f6c98 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package block @@ -16,7 +16,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockExecutorI is an autogenerated mock type for the ExecutorI type + type MockExecutorI struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockExecutorI) EXPECT() *MockExecutorI_Expecter { return &MockExecutorI_Expecter{mock: &_m.Mock} } -// AddConsensusMsgs provides a mock function with given fields: _a0 + func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _va := make([]interface{}, len(_a0)) for _i := range _a0 { @@ -40,13 +40,13 @@ func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _m.Called(_ca...) } -// MockExecutorI_AddConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddConsensusMsgs' + type MockExecutorI_AddConsensusMsgs_Call struct { *mock.Call } -// AddConsensusMsgs is a helper method to define mock.On call -// - _a0 ...proto.Message + + func (_e *MockExecutorI_Expecter) AddConsensusMsgs(_a0 ...interface{}) *MockExecutorI_AddConsensusMsgs_Call { return &MockExecutorI_AddConsensusMsgs_Call{Call: _e.mock.On("AddConsensusMsgs", append([]interface{}{}, _a0...)...)} @@ -75,7 +75,7 @@ func (_c *MockExecutorI_AddConsensusMsgs_Call) RunAndReturn(run func(...proto.Me return _c } -// Commit provides a mock function with given fields: _a0, _a1, resp + func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state.ABCIResponses) ([]byte, int64, error) { ret := _m.Called(_a0, _a1, resp) @@ -112,15 +112,15 @@ func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state. return r0, r1, r2 } -// MockExecutorI_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' + type MockExecutorI_Commit_Call struct { *mock.Call } -// Commit is a helper method to define mock.On call -// - _a0 *types.State -// - _a1 *types.Block -// - resp *state.ABCIResponses + + + + func (_e *MockExecutorI_Expecter) Commit(_a0 interface{}, _a1 interface{}, resp interface{}) *MockExecutorI_Commit_Call { return &MockExecutorI_Commit_Call{Call: _e.mock.On("Commit", _a0, _a1, resp)} } @@ -142,7 +142,7 @@ func (_c *MockExecutorI_Commit_Call) RunAndReturn(run func(*types.State, *types. return _c } -// CreateBlock provides a mock function with given fields: height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes + func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, nextSeqHash [32]byte, _a4 *types.State, maxBlockDataSizeBytes uint64) *types.Block { ret := _m.Called(height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes) @@ -162,18 +162,18 @@ func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, la return r0 } -// MockExecutorI_CreateBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBlock' + type MockExecutorI_CreateBlock_Call struct { *mock.Call } -// CreateBlock is a helper method to define mock.On call -// - height uint64 -// - lastCommit *types.Commit -// - lastHeaderHash [32]byte -// - nextSeqHash [32]byte -// - _a4 *types.State -// - maxBlockDataSizeBytes uint64 + + + + + + + func (_e *MockExecutorI_Expecter) CreateBlock(height interface{}, lastCommit interface{}, lastHeaderHash interface{}, nextSeqHash interface{}, _a4 interface{}, maxBlockDataSizeBytes interface{}) *MockExecutorI_CreateBlock_Call { return &MockExecutorI_CreateBlock_Call{Call: _e.mock.On("CreateBlock", height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes)} } @@ -195,7 +195,7 @@ func (_c *MockExecutorI_CreateBlock_Call) RunAndReturn(run func(uint64, *types.C return _c } -// ExecuteBlock provides a mock function with given fields: _a0 + func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, error) { ret := _m.Called(_a0) @@ -225,13 +225,13 @@ func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, e return r0, r1 } -// MockExecutorI_ExecuteBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteBlock' + type MockExecutorI_ExecuteBlock_Call struct { *mock.Call } -// ExecuteBlock is a helper method to define mock.On call -// - _a0 *types.Block + + func (_e *MockExecutorI_Expecter) ExecuteBlock(_a0 interface{}) *MockExecutorI_ExecuteBlock_Call { return &MockExecutorI_ExecuteBlock_Call{Call: _e.mock.On("ExecuteBlock", _a0)} } @@ -253,7 +253,7 @@ func (_c *MockExecutorI_ExecuteBlock_Call) RunAndReturn(run func(*types.Block) ( return _c } -// GetAppInfo provides a mock function with given fields: + func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { ret := _m.Called() @@ -283,12 +283,12 @@ func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { return r0, r1 } -// MockExecutorI_GetAppInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAppInfo' + type MockExecutorI_GetAppInfo_Call struct { *mock.Call } -// GetAppInfo is a helper method to define mock.On call + func (_e *MockExecutorI_Expecter) GetAppInfo() *MockExecutorI_GetAppInfo_Call { return &MockExecutorI_GetAppInfo_Call{Call: _e.mock.On("GetAppInfo")} } @@ -310,7 +310,7 @@ func (_c *MockExecutorI_GetAppInfo_Call) RunAndReturn(run func() (*abcitypes.Res return _c } -// GetConsensusMsgs provides a mock function with given fields: + func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { ret := _m.Called() @@ -330,12 +330,12 @@ func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { return r0 } -// MockExecutorI_GetConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConsensusMsgs' + type MockExecutorI_GetConsensusMsgs_Call struct { *mock.Call } -// GetConsensusMsgs is a helper method to define mock.On call + func (_e *MockExecutorI_Expecter) GetConsensusMsgs() *MockExecutorI_GetConsensusMsgs_Call { return &MockExecutorI_GetConsensusMsgs_Call{Call: _e.mock.On("GetConsensusMsgs")} } @@ -357,7 +357,7 @@ func (_c *MockExecutorI_GetConsensusMsgs_Call) RunAndReturn(run func() []proto.M return _c } -// InitChain provides a mock function with given fields: genesis, genesisChecksum, valset + func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisChecksum string, valset []*tenderminttypes.Validator) (*abcitypes.ResponseInitChain, error) { ret := _m.Called(genesis, genesisChecksum, valset) @@ -387,15 +387,15 @@ func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisC return r0, r1 } -// MockExecutorI_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' + type MockExecutorI_InitChain_Call struct { *mock.Call } -// InitChain is a helper method to define mock.On call -// - genesis *tenderminttypes.GenesisDoc -// - genesisChecksum string -// - valset []*tenderminttypes.Validator + + + + func (_e *MockExecutorI_Expecter) InitChain(genesis interface{}, genesisChecksum interface{}, valset interface{}) *MockExecutorI_InitChain_Call { return &MockExecutorI_InitChain_Call{Call: _e.mock.On("InitChain", genesis, genesisChecksum, valset)} } @@ -417,18 +417,18 @@ func (_c *MockExecutorI_InitChain_Call) RunAndReturn(run func(*tenderminttypes.G return _c } -// UpdateMempoolAfterInitChain provides a mock function with given fields: s + func (_m *MockExecutorI) UpdateMempoolAfterInitChain(s *types.State) { _m.Called(s) } -// MockExecutorI_UpdateMempoolAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateMempoolAfterInitChain' + type MockExecutorI_UpdateMempoolAfterInitChain_Call struct { *mock.Call } -// UpdateMempoolAfterInitChain is a helper method to define mock.On call -// - s *types.State + + func (_e *MockExecutorI_Expecter) UpdateMempoolAfterInitChain(s interface{}) *MockExecutorI_UpdateMempoolAfterInitChain_Call { return &MockExecutorI_UpdateMempoolAfterInitChain_Call{Call: _e.mock.On("UpdateMempoolAfterInitChain", s)} } @@ -450,7 +450,7 @@ func (_c *MockExecutorI_UpdateMempoolAfterInitChain_Call) RunAndReturn(run func( return _c } -// UpdateProposerFromBlock provides a mock function with given fields: s, seqSet, _a2 + func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, _a2 *types.Block) bool { ret := _m.Called(s, seqSet, _a2) @@ -468,15 +468,15 @@ func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.S return r0 } -// MockExecutorI_UpdateProposerFromBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateProposerFromBlock' + type MockExecutorI_UpdateProposerFromBlock_Call struct { *mock.Call } -// UpdateProposerFromBlock is a helper method to define mock.On call -// - s *types.State -// - seqSet *types.SequencerSet -// - _a2 *types.Block + + + + func (_e *MockExecutorI_Expecter) UpdateProposerFromBlock(s interface{}, seqSet interface{}, _a2 interface{}) *MockExecutorI_UpdateProposerFromBlock_Call { return &MockExecutorI_UpdateProposerFromBlock_Call{Call: _e.mock.On("UpdateProposerFromBlock", s, seqSet, _a2)} } @@ -498,22 +498,22 @@ func (_c *MockExecutorI_UpdateProposerFromBlock_Call) RunAndReturn(run func(*typ return _c } -// UpdateStateAfterCommit provides a mock function with given fields: s, resp, appHash, height, lastHeaderHash + func (_m *MockExecutorI) UpdateStateAfterCommit(s *types.State, resp *state.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { _m.Called(s, resp, appHash, height, lastHeaderHash) } -// MockExecutorI_UpdateStateAfterCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterCommit' + type MockExecutorI_UpdateStateAfterCommit_Call struct { *mock.Call } -// UpdateStateAfterCommit is a helper method to define mock.On call -// - s *types.State -// - resp *state.ABCIResponses -// - appHash []byte -// - height uint64 -// - lastHeaderHash [32]byte + + + + + + func (_e *MockExecutorI_Expecter) UpdateStateAfterCommit(s interface{}, resp interface{}, appHash interface{}, height interface{}, lastHeaderHash interface{}) *MockExecutorI_UpdateStateAfterCommit_Call { return &MockExecutorI_UpdateStateAfterCommit_Call{Call: _e.mock.On("UpdateStateAfterCommit", s, resp, appHash, height, lastHeaderHash)} } @@ -535,19 +535,19 @@ func (_c *MockExecutorI_UpdateStateAfterCommit_Call) RunAndReturn(run func(*type return _c } -// UpdateStateAfterInitChain provides a mock function with given fields: s, res + func (_m *MockExecutorI) UpdateStateAfterInitChain(s *types.State, res *abcitypes.ResponseInitChain) { _m.Called(s, res) } -// MockExecutorI_UpdateStateAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterInitChain' + type MockExecutorI_UpdateStateAfterInitChain_Call struct { *mock.Call } -// UpdateStateAfterInitChain is a helper method to define mock.On call -// - s *types.State -// - res *abcitypes.ResponseInitChain + + + func (_e *MockExecutorI_Expecter) UpdateStateAfterInitChain(s interface{}, res interface{}) *MockExecutorI_UpdateStateAfterInitChain_Call { return &MockExecutorI_UpdateStateAfterInitChain_Call{Call: _e.mock.On("UpdateStateAfterInitChain", s, res)} } @@ -569,8 +569,8 @@ func (_c *MockExecutorI_UpdateStateAfterInitChain_Call) RunAndReturn(run func(*t return _c } -// NewMockExecutorI creates a new instance of MockExecutorI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockExecutorI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go index 932c51a2e..54b9098d2 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package block @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" ) -// MockFraudHandler is an autogenerated mock type for the FraudHandler type + type MockFraudHandler struct { mock.Mock } @@ -21,19 +21,19 @@ func (_m *MockFraudHandler) EXPECT() *MockFraudHandler_Expecter { return &MockFraudHandler_Expecter{mock: &_m.Mock} } -// HandleFault provides a mock function with given fields: ctx, fault + func (_m *MockFraudHandler) HandleFault(ctx context.Context, fault error) { _m.Called(ctx, fault) } -// MockFraudHandler_HandleFault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleFault' + type MockFraudHandler_HandleFault_Call struct { *mock.Call } -// HandleFault is a helper method to define mock.On call -// - ctx context.Context -// - fault error + + + func (_e *MockFraudHandler_Expecter) HandleFault(ctx interface{}, fault interface{}) *MockFraudHandler_HandleFault_Call { return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", ctx, fault)} } @@ -55,8 +55,8 @@ func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(context.Conte return _c } -// NewMockFraudHandler creates a new instance of MockFraudHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockFraudHandler(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go index 6a52c1df8..b591d3572 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go +++ b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package avail @@ -14,7 +14,7 @@ import ( types "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) -// MockSubstrateApiI is an autogenerated mock type for the SubstrateApiI type + type MockSubstrateApiI struct { mock.Mock } @@ -27,7 +27,7 @@ func (_m *MockSubstrateApiI) EXPECT() *MockSubstrateApiI_Expecter { return &MockSubstrateApiI_Expecter{mock: &_m.Mock} } -// GetBlock provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, error) { ret := _m.Called(blockHash) @@ -57,13 +57,13 @@ func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, return r0, r1 } -// MockSubstrateApiI_GetBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlock' + type MockSubstrateApiI_GetBlock_Call struct { *mock.Call } -// GetBlock is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetBlock(blockHash interface{}) *MockSubstrateApiI_GetBlock_Call { return &MockSubstrateApiI_GetBlock_Call{Call: _e.mock.On("GetBlock", blockHash)} } @@ -85,7 +85,7 @@ func (_c *MockSubstrateApiI_GetBlock_Call) RunAndReturn(run func(types.Hash) (*t return _c } -// GetBlockHash provides a mock function with given fields: blockNumber + func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error) { ret := _m.Called(blockNumber) @@ -115,13 +115,13 @@ func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error return r0, r1 } -// MockSubstrateApiI_GetBlockHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHash' + type MockSubstrateApiI_GetBlockHash_Call struct { *mock.Call } -// GetBlockHash is a helper method to define mock.On call -// - blockNumber uint64 + + func (_e *MockSubstrateApiI_Expecter) GetBlockHash(blockNumber interface{}) *MockSubstrateApiI_GetBlockHash_Call { return &MockSubstrateApiI_GetBlockHash_Call{Call: _e.mock.On("GetBlockHash", blockNumber)} } @@ -143,7 +143,7 @@ func (_c *MockSubstrateApiI_GetBlockHash_Call) RunAndReturn(run func(uint64) (ty return _c } -// GetBlockHashLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { ret := _m.Called() @@ -173,12 +173,12 @@ func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { return r0, r1 } -// MockSubstrateApiI_GetBlockHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHashLatest' + type MockSubstrateApiI_GetBlockHashLatest_Call struct { *mock.Call } -// GetBlockHashLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetBlockHashLatest() *MockSubstrateApiI_GetBlockHashLatest_Call { return &MockSubstrateApiI_GetBlockHashLatest_Call{Call: _e.mock.On("GetBlockHashLatest")} } @@ -200,7 +200,7 @@ func (_c *MockSubstrateApiI_GetBlockHashLatest_Call) RunAndReturn(run func() (ty return _c } -// GetBlockLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { ret := _m.Called() @@ -230,12 +230,12 @@ func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { return r0, r1 } -// MockSubstrateApiI_GetBlockLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockLatest' + type MockSubstrateApiI_GetBlockLatest_Call struct { *mock.Call } -// GetBlockLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetBlockLatest() *MockSubstrateApiI_GetBlockLatest_Call { return &MockSubstrateApiI_GetBlockLatest_Call{Call: _e.mock.On("GetBlockLatest")} } @@ -257,7 +257,7 @@ func (_c *MockSubstrateApiI_GetBlockLatest_Call) RunAndReturn(run func() (*types return _c } -// GetChildKeys provides a mock function with given fields: childStorageKey, prefix, blockHash + func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix, blockHash) @@ -287,15 +287,15 @@ func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, pref return r0, r1 } -// MockSubstrateApiI_GetChildKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeys' + type MockSubstrateApiI_GetChildKeys_Call struct { *mock.Call } -// GetChildKeys is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - prefix types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildKeys(childStorageKey interface{}, prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildKeys_Call { return &MockSubstrateApiI_GetChildKeys_Call{Call: _e.mock.On("GetChildKeys", childStorageKey, prefix, blockHash)} } @@ -317,7 +317,7 @@ func (_c *MockSubstrateApiI_GetChildKeys_Call) RunAndReturn(run func(types.Stora return _c } -// GetChildKeysLatest provides a mock function with given fields: childStorageKey, prefix + func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey, prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix) @@ -347,14 +347,14 @@ func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey return r0, r1 } -// MockSubstrateApiI_GetChildKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeysLatest' + type MockSubstrateApiI_GetChildKeysLatest_Call struct { *mock.Call } -// GetChildKeysLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - prefix types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildKeysLatest(childStorageKey interface{}, prefix interface{}) *MockSubstrateApiI_GetChildKeysLatest_Call { return &MockSubstrateApiI_GetChildKeysLatest_Call{Call: _e.mock.On("GetChildKeysLatest", childStorageKey, prefix)} } @@ -376,7 +376,7 @@ func (_c *MockSubstrateApiI_GetChildKeysLatest_Call) RunAndReturn(run func(types return _c } -// GetChildStorage provides a mock function with given fields: childStorageKey, key, target, blockHash + func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(childStorageKey, key, target, blockHash) @@ -404,16 +404,16 @@ func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, k return r0, r1 } -// MockSubstrateApiI_GetChildStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorage' + type MockSubstrateApiI_GetChildStorage_Call struct { *mock.Call } -// GetChildStorage is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - target interface{} -// - blockHash types.Hash + + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorage(childStorageKey interface{}, key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorage_Call { return &MockSubstrateApiI_GetChildStorage_Call{Call: _e.mock.On("GetChildStorage", childStorageKey, key, target, blockHash)} } @@ -435,7 +435,7 @@ func (_c *MockSubstrateApiI_GetChildStorage_Call) RunAndReturn(run func(types.St return _c } -// GetChildStorageHash provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -465,15 +465,15 @@ func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKe return r0, r1 } -// MockSubstrateApiI_GetChildStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHash' + type MockSubstrateApiI_GetChildStorageHash_Call struct { *mock.Call } -// GetChildStorageHash is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageHash(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageHash_Call { return &MockSubstrateApiI_GetChildStorageHash_Call{Call: _e.mock.On("GetChildStorageHash", childStorageKey, key, blockHash)} } @@ -495,7 +495,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHash_Call) RunAndReturn(run func(type return _c } -// GetChildStorageHashLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.Hash, error) { ret := _m.Called(childStorageKey, key) @@ -525,14 +525,14 @@ func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.Sto return r0, r1 } -// MockSubstrateApiI_GetChildStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHashLatest' + type MockSubstrateApiI_GetChildStorageHashLatest_Call struct { *mock.Call } -// GetChildStorageHashLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageHashLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageHashLatest_Call { return &MockSubstrateApiI_GetChildStorageHashLatest_Call{Call: _e.mock.On("GetChildStorageHashLatest", childStorageKey, key)} } @@ -554,7 +554,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHashLatest_Call) RunAndReturn(run fun return _c } -// GetChildStorageLatest provides a mock function with given fields: childStorageKey, key, target + func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.StorageKey, key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(childStorageKey, key, target) @@ -582,15 +582,15 @@ func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.Storage return r0, r1 } -// MockSubstrateApiI_GetChildStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageLatest' + type MockSubstrateApiI_GetChildStorageLatest_Call struct { *mock.Call } -// GetChildStorageLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - target interface{} + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageLatest(childStorageKey interface{}, key interface{}, target interface{}) *MockSubstrateApiI_GetChildStorageLatest_Call { return &MockSubstrateApiI_GetChildStorageLatest_Call{Call: _e.mock.On("GetChildStorageLatest", childStorageKey, key, target)} } @@ -612,7 +612,7 @@ func (_c *MockSubstrateApiI_GetChildStorageLatest_Call) RunAndReturn(run func(ty return _c } -// GetChildStorageRaw provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -642,15 +642,15 @@ func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey return r0, r1 } -// MockSubstrateApiI_GetChildStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRaw' + type MockSubstrateApiI_GetChildStorageRaw_Call struct { *mock.Call } -// GetChildStorageRaw is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageRaw(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageRaw_Call { return &MockSubstrateApiI_GetChildStorageRaw_Call{Call: _e.mock.On("GetChildStorageRaw", childStorageKey, key, blockHash)} } @@ -672,7 +672,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRaw_Call) RunAndReturn(run func(types return _c } -// GetChildStorageRawLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.StorageKey, key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key) @@ -702,14 +702,14 @@ func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.Stor return r0, r1 } -// MockSubstrateApiI_GetChildStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRawLatest' + type MockSubstrateApiI_GetChildStorageRawLatest_Call struct { *mock.Call } -// GetChildStorageRawLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageRawLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageRawLatest_Call { return &MockSubstrateApiI_GetChildStorageRawLatest_Call{Call: _e.mock.On("GetChildStorageRawLatest", childStorageKey, key)} } @@ -731,7 +731,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRawLatest_Call) RunAndReturn(run func return _c } -// GetChildStorageSize provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -759,15 +759,15 @@ func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKe return r0, r1 } -// MockSubstrateApiI_GetChildStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSize' + type MockSubstrateApiI_GetChildStorageSize_Call struct { *mock.Call } -// GetChildStorageSize is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageSize(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageSize_Call { return &MockSubstrateApiI_GetChildStorageSize_Call{Call: _e.mock.On("GetChildStorageSize", childStorageKey, key, blockHash)} } @@ -789,7 +789,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSize_Call) RunAndReturn(run func(type return _c } -// GetChildStorageSizeLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.U64, error) { ret := _m.Called(childStorageKey, key) @@ -817,14 +817,14 @@ func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.Sto return r0, r1 } -// MockSubstrateApiI_GetChildStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSizeLatest' + type MockSubstrateApiI_GetChildStorageSizeLatest_Call struct { *mock.Call } -// GetChildStorageSizeLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageSizeLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageSizeLatest_Call { return &MockSubstrateApiI_GetChildStorageSizeLatest_Call{Call: _e.mock.On("GetChildStorageSizeLatest", childStorageKey, key)} } @@ -846,7 +846,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSizeLatest_Call) RunAndReturn(run fun return _c } -// GetFinalizedHead provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { ret := _m.Called() @@ -876,12 +876,12 @@ func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { return r0, r1 } -// MockSubstrateApiI_GetFinalizedHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFinalizedHead' + type MockSubstrateApiI_GetFinalizedHead_Call struct { *mock.Call } -// GetFinalizedHead is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetFinalizedHead() *MockSubstrateApiI_GetFinalizedHead_Call { return &MockSubstrateApiI_GetFinalizedHead_Call{Call: _e.mock.On("GetFinalizedHead")} } @@ -903,7 +903,7 @@ func (_c *MockSubstrateApiI_GetFinalizedHead_Call) RunAndReturn(run func() (type return _c } -// GetHeader provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, error) { ret := _m.Called(blockHash) @@ -933,13 +933,13 @@ func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, err return r0, r1 } -// MockSubstrateApiI_GetHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeader' + type MockSubstrateApiI_GetHeader_Call struct { *mock.Call } -// GetHeader is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetHeader(blockHash interface{}) *MockSubstrateApiI_GetHeader_Call { return &MockSubstrateApiI_GetHeader_Call{Call: _e.mock.On("GetHeader", blockHash)} } @@ -961,7 +961,7 @@ func (_c *MockSubstrateApiI_GetHeader_Call) RunAndReturn(run func(types.Hash) (* return _c } -// GetHeaderLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { ret := _m.Called() @@ -991,12 +991,12 @@ func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { return r0, r1 } -// MockSubstrateApiI_GetHeaderLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeaderLatest' + type MockSubstrateApiI_GetHeaderLatest_Call struct { *mock.Call } -// GetHeaderLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetHeaderLatest() *MockSubstrateApiI_GetHeaderLatest_Call { return &MockSubstrateApiI_GetHeaderLatest_Call{Call: _e.mock.On("GetHeaderLatest")} } @@ -1018,7 +1018,7 @@ func (_c *MockSubstrateApiI_GetHeaderLatest_Call) RunAndReturn(run func() (*type return _c } -// GetKeys provides a mock function with given fields: prefix, blockHash + func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(prefix, blockHash) @@ -1048,14 +1048,14 @@ func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Ha return r0, r1 } -// MockSubstrateApiI_GetKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeys' + type MockSubstrateApiI_GetKeys_Call struct { *mock.Call } -// GetKeys is a helper method to define mock.On call -// - prefix types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetKeys(prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetKeys_Call { return &MockSubstrateApiI_GetKeys_Call{Call: _e.mock.On("GetKeys", prefix, blockHash)} } @@ -1077,7 +1077,7 @@ func (_c *MockSubstrateApiI_GetKeys_Call) RunAndReturn(run func(types.StorageKey return _c } -// GetKeysLatest provides a mock function with given fields: prefix + func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(prefix) @@ -1107,13 +1107,13 @@ func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.Sto return r0, r1 } -// MockSubstrateApiI_GetKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeysLatest' + type MockSubstrateApiI_GetKeysLatest_Call struct { *mock.Call } -// GetKeysLatest is a helper method to define mock.On call -// - prefix types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetKeysLatest(prefix interface{}) *MockSubstrateApiI_GetKeysLatest_Call { return &MockSubstrateApiI_GetKeysLatest_Call{Call: _e.mock.On("GetKeysLatest", prefix)} } @@ -1135,7 +1135,7 @@ func (_c *MockSubstrateApiI_GetKeysLatest_Call) RunAndReturn(run func(types.Stor return _c } -// GetMetadata provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, error) { ret := _m.Called(blockHash) @@ -1165,13 +1165,13 @@ func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, return r0, r1 } -// MockSubstrateApiI_GetMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadata' + type MockSubstrateApiI_GetMetadata_Call struct { *mock.Call } -// GetMetadata is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetMetadata(blockHash interface{}) *MockSubstrateApiI_GetMetadata_Call { return &MockSubstrateApiI_GetMetadata_Call{Call: _e.mock.On("GetMetadata", blockHash)} } @@ -1193,7 +1193,7 @@ func (_c *MockSubstrateApiI_GetMetadata_Call) RunAndReturn(run func(types.Hash) return _c } -// GetMetadataLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { ret := _m.Called() @@ -1223,12 +1223,12 @@ func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { return r0, r1 } -// MockSubstrateApiI_GetMetadataLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadataLatest' + type MockSubstrateApiI_GetMetadataLatest_Call struct { *mock.Call } -// GetMetadataLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetMetadataLatest() *MockSubstrateApiI_GetMetadataLatest_Call { return &MockSubstrateApiI_GetMetadataLatest_Call{Call: _e.mock.On("GetMetadataLatest")} } @@ -1250,7 +1250,7 @@ func (_c *MockSubstrateApiI_GetMetadataLatest_Call) RunAndReturn(run func() (*ty return _c } -// GetRuntimeVersion provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.RuntimeVersion, error) { ret := _m.Called(blockHash) @@ -1280,13 +1280,13 @@ func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.Run return r0, r1 } -// MockSubstrateApiI_GetRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersion' + type MockSubstrateApiI_GetRuntimeVersion_Call struct { *mock.Call } -// GetRuntimeVersion is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersion(blockHash interface{}) *MockSubstrateApiI_GetRuntimeVersion_Call { return &MockSubstrateApiI_GetRuntimeVersion_Call{Call: _e.mock.On("GetRuntimeVersion", blockHash)} } @@ -1308,7 +1308,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersion_Call) RunAndReturn(run func(types. return _c } -// GetRuntimeVersionLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, error) { ret := _m.Called() @@ -1338,12 +1338,12 @@ func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, e return r0, r1 } -// MockSubstrateApiI_GetRuntimeVersionLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersionLatest' + type MockSubstrateApiI_GetRuntimeVersionLatest_Call struct { *mock.Call } -// GetRuntimeVersionLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersionLatest() *MockSubstrateApiI_GetRuntimeVersionLatest_Call { return &MockSubstrateApiI_GetRuntimeVersionLatest_Call{Call: _e.mock.On("GetRuntimeVersionLatest")} } @@ -1365,7 +1365,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersionLatest_Call) RunAndReturn(run func( return _c } -// GetStorage provides a mock function with given fields: key, target, blockHash + func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(key, target, blockHash) @@ -1393,15 +1393,15 @@ func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{} return r0, r1 } -// MockSubstrateApiI_GetStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorage' + type MockSubstrateApiI_GetStorage_Call struct { *mock.Call } -// GetStorage is a helper method to define mock.On call -// - key types.StorageKey -// - target interface{} -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetStorage(key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorage_Call { return &MockSubstrateApiI_GetStorage_Call{Call: _e.mock.On("GetStorage", key, target, blockHash)} } @@ -1423,7 +1423,7 @@ func (_c *MockSubstrateApiI_GetStorage_Call) RunAndReturn(run func(types.Storage return _c } -// GetStorageHash provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(key, blockHash) @@ -1453,14 +1453,14 @@ func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash type return r0, r1 } -// MockSubstrateApiI_GetStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHash' + type MockSubstrateApiI_GetStorageHash_Call struct { *mock.Call } -// GetStorageHash is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageHash(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageHash_Call { return &MockSubstrateApiI_GetStorageHash_Call{Call: _e.mock.On("GetStorageHash", key, blockHash)} } @@ -1482,7 +1482,7 @@ func (_c *MockSubstrateApiI_GetStorageHash_Call) RunAndReturn(run func(types.Sto return _c } -// GetStorageHashLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.Hash, error) { ret := _m.Called(key) @@ -1512,13 +1512,13 @@ func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.H return r0, r1 } -// MockSubstrateApiI_GetStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHashLatest' + type MockSubstrateApiI_GetStorageHashLatest_Call struct { *mock.Call } -// GetStorageHashLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageHashLatest(key interface{}) *MockSubstrateApiI_GetStorageHashLatest_Call { return &MockSubstrateApiI_GetStorageHashLatest_Call{Call: _e.mock.On("GetStorageHashLatest", key)} } @@ -1540,7 +1540,7 @@ func (_c *MockSubstrateApiI_GetStorageHashLatest_Call) RunAndReturn(run func(typ return _c } -// GetStorageLatest provides a mock function with given fields: key, target + func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(key, target) @@ -1568,14 +1568,14 @@ func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target inter return r0, r1 } -// MockSubstrateApiI_GetStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageLatest' + type MockSubstrateApiI_GetStorageLatest_Call struct { *mock.Call } -// GetStorageLatest is a helper method to define mock.On call -// - key types.StorageKey -// - target interface{} + + + func (_e *MockSubstrateApiI_Expecter) GetStorageLatest(key interface{}, target interface{}) *MockSubstrateApiI_GetStorageLatest_Call { return &MockSubstrateApiI_GetStorageLatest_Call{Call: _e.mock.On("GetStorageLatest", key, target)} } @@ -1597,7 +1597,7 @@ func (_c *MockSubstrateApiI_GetStorageLatest_Call) RunAndReturn(run func(types.S return _c } -// GetStorageRaw provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(key, blockHash) @@ -1627,14 +1627,14 @@ func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types return r0, r1 } -// MockSubstrateApiI_GetStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRaw' + type MockSubstrateApiI_GetStorageRaw_Call struct { *mock.Call } -// GetStorageRaw is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageRaw(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageRaw_Call { return &MockSubstrateApiI_GetStorageRaw_Call{Call: _e.mock.On("GetStorageRaw", key, blockHash)} } @@ -1656,7 +1656,7 @@ func (_c *MockSubstrateApiI_GetStorageRaw_Call) RunAndReturn(run func(types.Stor return _c } -// GetStorageRawLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(key) @@ -1686,13 +1686,13 @@ func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.S return r0, r1 } -// MockSubstrateApiI_GetStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRawLatest' + type MockSubstrateApiI_GetStorageRawLatest_Call struct { *mock.Call } -// GetStorageRawLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageRawLatest(key interface{}) *MockSubstrateApiI_GetStorageRawLatest_Call { return &MockSubstrateApiI_GetStorageRawLatest_Call{Call: _e.mock.On("GetStorageRawLatest", key)} } @@ -1714,7 +1714,7 @@ func (_c *MockSubstrateApiI_GetStorageRawLatest_Call) RunAndReturn(run func(type return _c } -// GetStorageSize provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(key, blockHash) @@ -1742,14 +1742,14 @@ func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash type return r0, r1 } -// MockSubstrateApiI_GetStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSize' + type MockSubstrateApiI_GetStorageSize_Call struct { *mock.Call } -// GetStorageSize is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageSize(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageSize_Call { return &MockSubstrateApiI_GetStorageSize_Call{Call: _e.mock.On("GetStorageSize", key, blockHash)} } @@ -1771,7 +1771,7 @@ func (_c *MockSubstrateApiI_GetStorageSize_Call) RunAndReturn(run func(types.Sto return _c } -// GetStorageSizeLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U64, error) { ret := _m.Called(key) @@ -1799,13 +1799,13 @@ func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U return r0, r1 } -// MockSubstrateApiI_GetStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSizeLatest' + type MockSubstrateApiI_GetStorageSizeLatest_Call struct { *mock.Call } -// GetStorageSizeLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageSizeLatest(key interface{}) *MockSubstrateApiI_GetStorageSizeLatest_Call { return &MockSubstrateApiI_GetStorageSizeLatest_Call{Call: _e.mock.On("GetStorageSizeLatest", key)} } @@ -1827,7 +1827,7 @@ func (_c *MockSubstrateApiI_GetStorageSizeLatest_Call) RunAndReturn(run func(typ return _c } -// PendingExtrinsics provides a mock function with given fields: + func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { ret := _m.Called() @@ -1857,12 +1857,12 @@ func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { return r0, r1 } -// MockSubstrateApiI_PendingExtrinsics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingExtrinsics' + type MockSubstrateApiI_PendingExtrinsics_Call struct { *mock.Call } -// PendingExtrinsics is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) PendingExtrinsics() *MockSubstrateApiI_PendingExtrinsics_Call { return &MockSubstrateApiI_PendingExtrinsics_Call{Call: _e.mock.On("PendingExtrinsics")} } @@ -1884,7 +1884,7 @@ func (_c *MockSubstrateApiI_PendingExtrinsics_Call) RunAndReturn(run func() ([]t return _c } -// QueryStorage provides a mock function with given fields: keys, startBlock, block + func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock types.Hash, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock, block) @@ -1914,15 +1914,15 @@ func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock ty return r0, r1 } -// MockSubstrateApiI_QueryStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorage' + type MockSubstrateApiI_QueryStorage_Call struct { *mock.Call } -// QueryStorage is a helper method to define mock.On call -// - keys []types.StorageKey -// - startBlock types.Hash -// - block types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) QueryStorage(keys interface{}, startBlock interface{}, block interface{}) *MockSubstrateApiI_QueryStorage_Call { return &MockSubstrateApiI_QueryStorage_Call{Call: _e.mock.On("QueryStorage", keys, startBlock, block)} } @@ -1944,7 +1944,7 @@ func (_c *MockSubstrateApiI_QueryStorage_Call) RunAndReturn(run func([]types.Sto return _c } -// QueryStorageAt provides a mock function with given fields: keys, block + func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, block) @@ -1974,14 +1974,14 @@ func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types return r0, r1 } -// MockSubstrateApiI_QueryStorageAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAt' + type MockSubstrateApiI_QueryStorageAt_Call struct { *mock.Call } -// QueryStorageAt is a helper method to define mock.On call -// - keys []types.StorageKey -// - block types.Hash + + + func (_e *MockSubstrateApiI_Expecter) QueryStorageAt(keys interface{}, block interface{}) *MockSubstrateApiI_QueryStorageAt_Call { return &MockSubstrateApiI_QueryStorageAt_Call{Call: _e.mock.On("QueryStorageAt", keys, block)} } @@ -2003,7 +2003,7 @@ func (_c *MockSubstrateApiI_QueryStorageAt_Call) RunAndReturn(run func([]types.S return _c } -// QueryStorageAtLatest provides a mock function with given fields: keys + func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]types.StorageChangeSet, error) { ret := _m.Called(keys) @@ -2033,13 +2033,13 @@ func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]ty return r0, r1 } -// MockSubstrateApiI_QueryStorageAtLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAtLatest' + type MockSubstrateApiI_QueryStorageAtLatest_Call struct { *mock.Call } -// QueryStorageAtLatest is a helper method to define mock.On call -// - keys []types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) QueryStorageAtLatest(keys interface{}) *MockSubstrateApiI_QueryStorageAtLatest_Call { return &MockSubstrateApiI_QueryStorageAtLatest_Call{Call: _e.mock.On("QueryStorageAtLatest", keys)} } @@ -2061,7 +2061,7 @@ func (_c *MockSubstrateApiI_QueryStorageAtLatest_Call) RunAndReturn(run func([]t return _c } -// QueryStorageLatest provides a mock function with given fields: keys, startBlock + func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBlock types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock) @@ -2091,14 +2091,14 @@ func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBl return r0, r1 } -// MockSubstrateApiI_QueryStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageLatest' + type MockSubstrateApiI_QueryStorageLatest_Call struct { *mock.Call } -// QueryStorageLatest is a helper method to define mock.On call -// - keys []types.StorageKey -// - startBlock types.Hash + + + func (_e *MockSubstrateApiI_Expecter) QueryStorageLatest(keys interface{}, startBlock interface{}) *MockSubstrateApiI_QueryStorageLatest_Call { return &MockSubstrateApiI_QueryStorageLatest_Call{Call: _e.mock.On("QueryStorageLatest", keys, startBlock)} } @@ -2120,7 +2120,7 @@ func (_c *MockSubstrateApiI_QueryStorageLatest_Call) RunAndReturn(run func([]typ return _c } -// SubmitAndWatchExtrinsic provides a mock function with given fields: xt + func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*author.ExtrinsicStatusSubscription, error) { ret := _m.Called(xt) @@ -2150,13 +2150,13 @@ func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*autho return r0, r1 } -// MockSubstrateApiI_SubmitAndWatchExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitAndWatchExtrinsic' + type MockSubstrateApiI_SubmitAndWatchExtrinsic_Call struct { *mock.Call } -// SubmitAndWatchExtrinsic is a helper method to define mock.On call -// - xt types.Extrinsic + + func (_e *MockSubstrateApiI_Expecter) SubmitAndWatchExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call { return &MockSubstrateApiI_SubmitAndWatchExtrinsic_Call{Call: _e.mock.On("SubmitAndWatchExtrinsic", xt)} } @@ -2178,7 +2178,7 @@ func (_c *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call) RunAndReturn(run func( return _c } -// SubmitExtrinsic provides a mock function with given fields: xt + func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, error) { ret := _m.Called(xt) @@ -2208,13 +2208,13 @@ func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, er return r0, r1 } -// MockSubstrateApiI_SubmitExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitExtrinsic' + type MockSubstrateApiI_SubmitExtrinsic_Call struct { *mock.Call } -// SubmitExtrinsic is a helper method to define mock.On call -// - xt types.Extrinsic + + func (_e *MockSubstrateApiI_Expecter) SubmitExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitExtrinsic_Call { return &MockSubstrateApiI_SubmitExtrinsic_Call{Call: _e.mock.On("SubmitExtrinsic", xt)} } @@ -2236,7 +2236,7 @@ func (_c *MockSubstrateApiI_SubmitExtrinsic_Call) RunAndReturn(run func(types.Ex return _c } -// SubscribeFinalizedHeads provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSubscription, error) { ret := _m.Called() @@ -2266,12 +2266,12 @@ func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSub return r0, r1 } -// MockSubstrateApiI_SubscribeFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFinalizedHeads' + type MockSubstrateApiI_SubscribeFinalizedHeads_Call struct { *mock.Call } -// SubscribeFinalizedHeads is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeFinalizedHeads() *MockSubstrateApiI_SubscribeFinalizedHeads_Call { return &MockSubstrateApiI_SubscribeFinalizedHeads_Call{Call: _e.mock.On("SubscribeFinalizedHeads")} } @@ -2293,7 +2293,7 @@ func (_c *MockSubstrateApiI_SubscribeFinalizedHeads_Call) RunAndReturn(run func( return _c } -// SubscribeNewHeads provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, error) { ret := _m.Called() @@ -2323,12 +2323,12 @@ func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, e return r0, r1 } -// MockSubstrateApiI_SubscribeNewHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHeads' + type MockSubstrateApiI_SubscribeNewHeads_Call struct { *mock.Call } -// SubscribeNewHeads is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeNewHeads() *MockSubstrateApiI_SubscribeNewHeads_Call { return &MockSubstrateApiI_SubscribeNewHeads_Call{Call: _e.mock.On("SubscribeNewHeads")} } @@ -2350,7 +2350,7 @@ func (_c *MockSubstrateApiI_SubscribeNewHeads_Call) RunAndReturn(run func() (*ch return _c } -// SubscribeRuntimeVersion provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSubscription, error) { ret := _m.Called() @@ -2380,12 +2380,12 @@ func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSub return r0, r1 } -// MockSubstrateApiI_SubscribeRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeRuntimeVersion' + type MockSubstrateApiI_SubscribeRuntimeVersion_Call struct { *mock.Call } -// SubscribeRuntimeVersion is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeRuntimeVersion() *MockSubstrateApiI_SubscribeRuntimeVersion_Call { return &MockSubstrateApiI_SubscribeRuntimeVersion_Call{Call: _e.mock.On("SubscribeRuntimeVersion")} } @@ -2407,7 +2407,7 @@ func (_c *MockSubstrateApiI_SubscribeRuntimeVersion_Call) RunAndReturn(run func( return _c } -// SubscribeStorageRaw provides a mock function with given fields: keys + func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*state.StorageSubscription, error) { ret := _m.Called(keys) @@ -2437,13 +2437,13 @@ func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*stat return r0, r1 } -// MockSubstrateApiI_SubscribeStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeStorageRaw' + type MockSubstrateApiI_SubscribeStorageRaw_Call struct { *mock.Call } -// SubscribeStorageRaw is a helper method to define mock.On call -// - keys []types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) SubscribeStorageRaw(keys interface{}) *MockSubstrateApiI_SubscribeStorageRaw_Call { return &MockSubstrateApiI_SubscribeStorageRaw_Call{Call: _e.mock.On("SubscribeStorageRaw", keys)} } @@ -2465,8 +2465,8 @@ func (_c *MockSubstrateApiI_SubscribeStorageRaw_Call) RunAndReturn(run func([]ty return _c } -// NewMockSubstrateApiI creates a new instance of MockSubstrateApiI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockSubstrateApiI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go index f80184e4f..cb248d62a 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -16,7 +16,7 @@ import ( share "github.com/celestiaorg/celestia-openrpc/types/share" ) -// MockCelestiaRPCClient is an autogenerated mock type for the CelestiaRPCClient type + type MockCelestiaRPCClient struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockCelestiaRPCClient) EXPECT() *MockCelestiaRPCClient_Expecter { return &MockCelestiaRPCClient_Expecter{mock: &_m.Mock} } -// Get provides a mock function with given fields: ctx, height, namespace, commitment + func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -59,16 +59,16 @@ func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespa return r0, r1 } -// MockCelestiaRPCClient_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' + type MockCelestiaRPCClient_Get_Call struct { *mock.Call } -// Get is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - commitment blob.Commitment + + + + + func (_e *MockCelestiaRPCClient_Expecter) Get(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_Get_Call { return &MockCelestiaRPCClient_Get_Call{Call: _e.mock.On("Get", ctx, height, namespace, commitment)} } @@ -90,7 +90,7 @@ func (_c *MockCelestiaRPCClient_Get_Call) RunAndReturn(run func(context.Context, return _c } -// GetAll provides a mock function with given fields: _a0, _a1, _a2 + func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []share.Namespace) ([]*blob.Blob, error) { ret := _m.Called(_a0, _a1, _a2) @@ -120,15 +120,15 @@ func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []s return r0, r1 } -// MockCelestiaRPCClient_GetAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAll' + type MockCelestiaRPCClient_GetAll_Call struct { *mock.Call } -// GetAll is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 uint64 -// - _a2 []share.Namespace + + + + func (_e *MockCelestiaRPCClient_Expecter) GetAll(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockCelestiaRPCClient_GetAll_Call { return &MockCelestiaRPCClient_GetAll_Call{Call: _e.mock.On("GetAll", _a0, _a1, _a2)} } @@ -150,7 +150,7 @@ func (_c *MockCelestiaRPCClient_GetAll_Call) RunAndReturn(run func(context.Conte return _c } -// GetByHeight provides a mock function with given fields: ctx, height + func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { ret := _m.Called(ctx, height) @@ -180,14 +180,14 @@ func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) return r0, r1 } -// MockCelestiaRPCClient_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' + type MockCelestiaRPCClient_GetByHeight_Call struct { *mock.Call } -// GetByHeight is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 + + + func (_e *MockCelestiaRPCClient_Expecter) GetByHeight(ctx interface{}, height interface{}) *MockCelestiaRPCClient_GetByHeight_Call { return &MockCelestiaRPCClient_GetByHeight_Call{Call: _e.mock.On("GetByHeight", ctx, height)} } @@ -209,7 +209,7 @@ func (_c *MockCelestiaRPCClient_GetByHeight_Call) RunAndReturn(run func(context. return _c } -// GetProof provides a mock function with given fields: ctx, height, namespace, commitment + func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -239,16 +239,16 @@ func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, na return r0, r1 } -// MockCelestiaRPCClient_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' + type MockCelestiaRPCClient_GetProof_Call struct { *mock.Call } -// GetProof is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - commitment blob.Commitment + + + + + func (_e *MockCelestiaRPCClient_Expecter) GetProof(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_GetProof_Call { return &MockCelestiaRPCClient_GetProof_Call{Call: _e.mock.On("GetProof", ctx, height, namespace, commitment)} } @@ -270,7 +270,7 @@ func (_c *MockCelestiaRPCClient_GetProof_Call) RunAndReturn(run func(context.Con return _c } -// GetSignerBalance provides a mock function with given fields: ctx + func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coin, error) { ret := _m.Called(ctx) @@ -300,13 +300,13 @@ func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coi return r0, r1 } -// MockCelestiaRPCClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockCelestiaRPCClient_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call -// - ctx context.Context + + func (_e *MockCelestiaRPCClient_Expecter) GetSignerBalance(ctx interface{}) *MockCelestiaRPCClient_GetSignerBalance_Call { return &MockCelestiaRPCClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance", ctx)} } @@ -328,7 +328,7 @@ func (_c *MockCelestiaRPCClient_GetSignerBalance_Call) RunAndReturn(run func(con return _c } -// Included provides a mock function with given fields: ctx, height, namespace, proof, commitment + func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { ret := _m.Called(ctx, height, namespace, proof, commitment) @@ -356,17 +356,17 @@ func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, na return r0, r1 } -// MockCelestiaRPCClient_Included_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Included' + type MockCelestiaRPCClient_Included_Call struct { *mock.Call } -// Included is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - proof *blob.Proof -// - commitment blob.Commitment + + + + + + func (_e *MockCelestiaRPCClient_Expecter) Included(ctx interface{}, height interface{}, namespace interface{}, proof interface{}, commitment interface{}) *MockCelestiaRPCClient_Included_Call { return &MockCelestiaRPCClient_Included_Call{Call: _e.mock.On("Included", ctx, height, namespace, proof, commitment)} } @@ -388,7 +388,7 @@ func (_c *MockCelestiaRPCClient_Included_Call) RunAndReturn(run func(context.Con return _c } -// Submit provides a mock function with given fields: ctx, blobs, options + func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { ret := _m.Called(ctx, blobs, options) @@ -416,15 +416,15 @@ func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, return r0, r1 } -// MockCelestiaRPCClient_Submit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Submit' + type MockCelestiaRPCClient_Submit_Call struct { *mock.Call } -// Submit is a helper method to define mock.On call -// - ctx context.Context -// - blobs []*blob.Blob -// - options *blob.SubmitOptions + + + + func (_e *MockCelestiaRPCClient_Expecter) Submit(ctx interface{}, blobs interface{}, options interface{}) *MockCelestiaRPCClient_Submit_Call { return &MockCelestiaRPCClient_Submit_Call{Call: _e.mock.On("Submit", ctx, blobs, options)} } @@ -446,8 +446,8 @@ func (_c *MockCelestiaRPCClient_Submit_Call) RunAndReturn(run func(context.Conte return _c } -// NewMockCelestiaRPCClient creates a new instance of MockCelestiaRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockCelestiaRPCClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go index 9c20b8b5c..c116222ed 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package da @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockDataAvailabilityLayerClient is an autogenerated mock type for the DataAvailabilityLayerClient type + type MockDataAvailabilityLayerClient struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockDataAvailabilityLayerClient) EXPECT() *MockDataAvailabilityLayerCl return &MockDataAvailabilityLayerClient_Expecter{mock: &_m.Mock} } -// CheckBatchAvailability provides a mock function with given fields: daMetaData + func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { ret := _m.Called(daMetaData) @@ -44,13 +44,13 @@ func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da return r0 } -// MockDataAvailabilityLayerClient_CheckBatchAvailability_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckBatchAvailability' + type MockDataAvailabilityLayerClient_CheckBatchAvailability_Call struct { *mock.Call } -// CheckBatchAvailability is a helper method to define mock.On call -// - daMetaData *da.DASubmitMetaData + + func (_e *MockDataAvailabilityLayerClient_Expecter) CheckBatchAvailability(daMetaData interface{}) *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call { return &MockDataAvailabilityLayerClient_CheckBatchAvailability_Call{Call: _e.mock.On("CheckBatchAvailability", daMetaData)} } @@ -72,7 +72,7 @@ func (_c *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call) RunAndRet return _c } -// GetClientType provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { ret := _m.Called() @@ -90,12 +90,12 @@ func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { return r0 } -// MockDataAvailabilityLayerClient_GetClientType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClientType' + type MockDataAvailabilityLayerClient_GetClientType_Call struct { *mock.Call } -// GetClientType is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetClientType() *MockDataAvailabilityLayerClient_GetClientType_Call { return &MockDataAvailabilityLayerClient_GetClientType_Call{Call: _e.mock.On("GetClientType")} } @@ -117,7 +117,7 @@ func (_c *MockDataAvailabilityLayerClient_GetClientType_Call) RunAndReturn(run f return _c } -// GetMaxBlobSizeBytes provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { ret := _m.Called() @@ -135,12 +135,12 @@ func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return r0 } -// MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMaxBlobSizeBytes' + type MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call struct { *mock.Call } -// GetMaxBlobSizeBytes is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetMaxBlobSizeBytes() *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call { return &MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call{Call: _e.mock.On("GetMaxBlobSizeBytes")} } @@ -162,7 +162,7 @@ func (_c *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call) RunAndReturn return _c } -// GetSignerBalance provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ret := _m.Called() @@ -190,12 +190,12 @@ func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error return r0, r1 } -// MockDataAvailabilityLayerClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockDataAvailabilityLayerClient_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetSignerBalance() *MockDataAvailabilityLayerClient_GetSignerBalance_Call { return &MockDataAvailabilityLayerClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -217,7 +217,7 @@ func (_c *MockDataAvailabilityLayerClient_GetSignerBalance_Call) RunAndReturn(ru return _c } -// Init provides a mock function with given fields: config, pubsubServer, kvStore, logger, options + func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...da.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -242,17 +242,17 @@ func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pub return r0 } -// MockDataAvailabilityLayerClient_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' + type MockDataAvailabilityLayerClient_Init_Call struct { *mock.Call } -// Init is a helper method to define mock.On call -// - config []byte -// - pubsubServer *pubsub.Server -// - kvStore store.KV -// - logger types.Logger -// - options ...da.Option + + + + + + func (_e *MockDataAvailabilityLayerClient_Expecter) Init(config interface{}, pubsubServer interface{}, kvStore interface{}, logger interface{}, options ...interface{}) *MockDataAvailabilityLayerClient_Init_Call { return &MockDataAvailabilityLayerClient_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, pubsubServer, kvStore, logger}, options...)...)} @@ -281,7 +281,7 @@ func (_c *MockDataAvailabilityLayerClient_Init_Call) RunAndReturn(run func([]byt return _c } -// Start provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) Start() error { ret := _m.Called() @@ -299,12 +299,12 @@ func (_m *MockDataAvailabilityLayerClient) Start() error { return r0 } -// MockDataAvailabilityLayerClient_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockDataAvailabilityLayerClient_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) Start() *MockDataAvailabilityLayerClient_Start_Call { return &MockDataAvailabilityLayerClient_Start_Call{Call: _e.mock.On("Start")} } @@ -326,7 +326,7 @@ func (_c *MockDataAvailabilityLayerClient_Start_Call) RunAndReturn(run func() er return _c } -// Stop provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) Stop() error { ret := _m.Called() @@ -344,12 +344,12 @@ func (_m *MockDataAvailabilityLayerClient) Stop() error { return r0 } -// MockDataAvailabilityLayerClient_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockDataAvailabilityLayerClient_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) Stop() *MockDataAvailabilityLayerClient_Stop_Call { return &MockDataAvailabilityLayerClient_Stop_Call{Call: _e.mock.On("Stop")} } @@ -371,7 +371,7 @@ func (_c *MockDataAvailabilityLayerClient_Stop_Call) RunAndReturn(run func() err return _c } -// SubmitBatch provides a mock function with given fields: batch + func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { ret := _m.Called(batch) @@ -389,13 +389,13 @@ func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.Re return r0 } -// MockDataAvailabilityLayerClient_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' + type MockDataAvailabilityLayerClient_SubmitBatch_Call struct { *mock.Call } -// SubmitBatch is a helper method to define mock.On call -// - batch *types.Batch + + func (_e *MockDataAvailabilityLayerClient_Expecter) SubmitBatch(batch interface{}) *MockDataAvailabilityLayerClient_SubmitBatch_Call { return &MockDataAvailabilityLayerClient_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch)} } @@ -417,17 +417,17 @@ func (_c *MockDataAvailabilityLayerClient_SubmitBatch_Call) RunAndReturn(run fun return _c } -// WaitForSyncing provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) WaitForSyncing() { _m.Called() } -// MockDataAvailabilityLayerClient_WaitForSyncing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForSyncing' + type MockDataAvailabilityLayerClient_WaitForSyncing_Call struct { *mock.Call } -// WaitForSyncing is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) WaitForSyncing() *MockDataAvailabilityLayerClient_WaitForSyncing_Call { return &MockDataAvailabilityLayerClient_WaitForSyncing_Call{Call: _e.mock.On("WaitForSyncing")} } @@ -449,8 +449,8 @@ func (_c *MockDataAvailabilityLayerClient_WaitForSyncing_Call) RunAndReturn(run return _c } -// NewMockDataAvailabilityLayerClient creates a new instance of MockDataAvailabilityLayerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockDataAvailabilityLayerClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go index 5396f942b..de07e1a71 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) -// MockProposerGetter is an autogenerated mock type for the ProposerGetter type + type MockProposerGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockProposerGetter) EXPECT() *MockProposerGetter_Expecter { return &MockProposerGetter_Expecter{mock: &_m.Mock} } -// GetProposerPubKey provides a mock function with given fields: + func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { return r0 } -// MockProposerGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' + type MockProposerGetter_GetProposerPubKey_Call struct { *mock.Call } -// GetProposerPubKey is a helper method to define mock.On call + func (_e *MockProposerGetter_Expecter) GetProposerPubKey() *MockProposerGetter_GetProposerPubKey_Call { return &MockProposerGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockProposerGetter_GetProposerPubKey_Call) RunAndReturn(run func() cry return _c } -// GetRevision provides a mock function with given fields: + func (_m *MockProposerGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockProposerGetter) GetRevision() uint64 { return r0 } -// MockProposerGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' + type MockProposerGetter_GetRevision_Call struct { *mock.Call } -// GetRevision is a helper method to define mock.On call + func (_e *MockProposerGetter_Expecter) GetRevision() *MockProposerGetter_GetRevision_Call { return &MockProposerGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockProposerGetter_GetRevision_Call) RunAndReturn(run func() uint64) * return _c } -// NewMockProposerGetter creates a new instance of MockProposerGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockProposerGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go index 4377638cb..477be16f8 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) -// MockStateGetter is an autogenerated mock type for the StateGetter type + type MockStateGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockStateGetter) EXPECT() *MockStateGetter_Expecter { return &MockStateGetter_Expecter{mock: &_m.Mock} } -// GetProposerPubKey provides a mock function with given fields: + func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { return r0 } -// MockStateGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' + type MockStateGetter_GetProposerPubKey_Call struct { *mock.Call } -// GetProposerPubKey is a helper method to define mock.On call + func (_e *MockStateGetter_Expecter) GetProposerPubKey() *MockStateGetter_GetProposerPubKey_Call { return &MockStateGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockStateGetter_GetProposerPubKey_Call) RunAndReturn(run func() crypto return _c } -// GetRevision provides a mock function with given fields: + func (_m *MockStateGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockStateGetter) GetRevision() uint64 { return r0 } -// MockStateGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' + type MockStateGetter_GetRevision_Call struct { *mock.Call } -// GetRevision is a helper method to define mock.On call + func (_e *MockStateGetter_Expecter) GetRevision() *MockStateGetter_GetRevision_Call { return &MockStateGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockStateGetter_GetRevision_Call) RunAndReturn(run func() uint64) *Moc return _c } -// NewMockStateGetter creates a new instance of MockStateGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockStateGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go index ade8efe9b..f79c856c1 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package dymension @@ -22,7 +22,7 @@ import ( types "github.com/cosmos/cosmos-sdk/types" ) -// MockCosmosClient is an autogenerated mock type for the CosmosClient type + type MockCosmosClient struct { mock.Mock } @@ -35,7 +35,7 @@ func (_m *MockCosmosClient) EXPECT() *MockCosmosClient_Expecter { return &MockCosmosClient_Expecter{mock: &_m.Mock} } -// BroadcastTx provides a mock function with given fields: accountName, msgs + func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) (cosmosclient.Response, error) { _va := make([]interface{}, len(msgs)) for _i := range msgs { @@ -70,14 +70,14 @@ func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) ( return r0, r1 } -// MockCosmosClient_BroadcastTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BroadcastTx' + type MockCosmosClient_BroadcastTx_Call struct { *mock.Call } -// BroadcastTx is a helper method to define mock.On call -// - accountName string -// - msgs ...types.Msg + + + func (_e *MockCosmosClient_Expecter) BroadcastTx(accountName interface{}, msgs ...interface{}) *MockCosmosClient_BroadcastTx_Call { return &MockCosmosClient_BroadcastTx_Call{Call: _e.mock.On("BroadcastTx", append([]interface{}{accountName}, msgs...)...)} @@ -106,7 +106,7 @@ func (_c *MockCosmosClient_BroadcastTx_Call) RunAndReturn(run func(string, ...ty return _c } -// Context provides a mock function with given fields: + func (_m *MockCosmosClient) Context() client.Context { ret := _m.Called() @@ -124,12 +124,12 @@ func (_m *MockCosmosClient) Context() client.Context { return r0 } -// MockCosmosClient_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' + type MockCosmosClient_Context_Call struct { *mock.Call } -// Context is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) Context() *MockCosmosClient_Context_Call { return &MockCosmosClient_Context_Call{Call: _e.mock.On("Context")} } @@ -151,7 +151,7 @@ func (_c *MockCosmosClient_Context_Call) RunAndReturn(run func() client.Context) return _c } -// EventListenerQuit provides a mock function with given fields: + func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { ret := _m.Called() @@ -171,12 +171,12 @@ func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { return r0 } -// MockCosmosClient_EventListenerQuit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EventListenerQuit' + type MockCosmosClient_EventListenerQuit_Call struct { *mock.Call } -// EventListenerQuit is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) EventListenerQuit() *MockCosmosClient_EventListenerQuit_Call { return &MockCosmosClient_EventListenerQuit_Call{Call: _e.mock.On("EventListenerQuit")} } @@ -198,7 +198,7 @@ func (_c *MockCosmosClient_EventListenerQuit_Call) RunAndReturn(run func() <-cha return _c } -// GetAccount provides a mock function with given fields: accountName + func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Account, error) { ret := _m.Called(accountName) @@ -226,13 +226,13 @@ func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Accoun return r0, r1 } -// MockCosmosClient_GetAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAccount' + type MockCosmosClient_GetAccount_Call struct { *mock.Call } -// GetAccount is a helper method to define mock.On call -// - accountName string + + func (_e *MockCosmosClient_Expecter) GetAccount(accountName interface{}) *MockCosmosClient_GetAccount_Call { return &MockCosmosClient_GetAccount_Call{Call: _e.mock.On("GetAccount", accountName)} } @@ -254,7 +254,7 @@ func (_c *MockCosmosClient_GetAccount_Call) RunAndReturn(run func(string) (cosmo return _c } -// GetBalance provides a mock function with given fields: ctx, accountName, denom + func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, denom string) (*types.Coin, error) { ret := _m.Called(ctx, accountName, denom) @@ -284,15 +284,15 @@ func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, return r0, r1 } -// MockCosmosClient_GetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBalance' + type MockCosmosClient_GetBalance_Call struct { *mock.Call } -// GetBalance is a helper method to define mock.On call -// - ctx context.Context -// - accountName string -// - denom string + + + + func (_e *MockCosmosClient_Expecter) GetBalance(ctx interface{}, accountName interface{}, denom interface{}) *MockCosmosClient_GetBalance_Call { return &MockCosmosClient_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, accountName, denom)} } @@ -314,7 +314,7 @@ func (_c *MockCosmosClient_GetBalance_Call) RunAndReturn(run func(context.Contex return _c } -// GetRollappClient provides a mock function with given fields: + func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { ret := _m.Called() @@ -334,12 +334,12 @@ func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { return r0 } -// MockCosmosClient_GetRollappClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollappClient' + type MockCosmosClient_GetRollappClient_Call struct { *mock.Call } -// GetRollappClient is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) GetRollappClient() *MockCosmosClient_GetRollappClient_Call { return &MockCosmosClient_GetRollappClient_Call{Call: _e.mock.On("GetRollappClient")} } @@ -361,7 +361,7 @@ func (_c *MockCosmosClient_GetRollappClient_Call) RunAndReturn(run func() rollap return _c } -// GetSequencerClient provides a mock function with given fields: + func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { ret := _m.Called() @@ -381,12 +381,12 @@ func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { return r0 } -// MockCosmosClient_GetSequencerClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerClient' + type MockCosmosClient_GetSequencerClient_Call struct { *mock.Call } -// GetSequencerClient is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) GetSequencerClient() *MockCosmosClient_GetSequencerClient_Call { return &MockCosmosClient_GetSequencerClient_Call{Call: _e.mock.On("GetSequencerClient")} } @@ -408,7 +408,7 @@ func (_c *MockCosmosClient_GetSequencerClient_Call) RunAndReturn(run func() sequ return _c } -// StartEventListener provides a mock function with given fields: + func (_m *MockCosmosClient) StartEventListener() error { ret := _m.Called() @@ -426,12 +426,12 @@ func (_m *MockCosmosClient) StartEventListener() error { return r0 } -// MockCosmosClient_StartEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StartEventListener' + type MockCosmosClient_StartEventListener_Call struct { *mock.Call } -// StartEventListener is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) StartEventListener() *MockCosmosClient_StartEventListener_Call { return &MockCosmosClient_StartEventListener_Call{Call: _e.mock.On("StartEventListener")} } @@ -453,7 +453,7 @@ func (_c *MockCosmosClient_StartEventListener_Call) RunAndReturn(run func() erro return _c } -// StopEventListener provides a mock function with given fields: + func (_m *MockCosmosClient) StopEventListener() error { ret := _m.Called() @@ -471,12 +471,12 @@ func (_m *MockCosmosClient) StopEventListener() error { return r0 } -// MockCosmosClient_StopEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopEventListener' + type MockCosmosClient_StopEventListener_Call struct { *mock.Call } -// StopEventListener is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) StopEventListener() *MockCosmosClient_StopEventListener_Call { return &MockCosmosClient_StopEventListener_Call{Call: _e.mock.On("StopEventListener")} } @@ -498,7 +498,7 @@ func (_c *MockCosmosClient_StopEventListener_Call) RunAndReturn(run func() error return _c } -// SubscribeToEvents provides a mock function with given fields: ctx, subscriber, query, outCapacity + func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) for _i := range outCapacity { @@ -535,16 +535,16 @@ func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber st return r0, r1 } -// MockCosmosClient_SubscribeToEvents_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToEvents' + type MockCosmosClient_SubscribeToEvents_Call struct { *mock.Call } -// SubscribeToEvents is a helper method to define mock.On call -// - ctx context.Context -// - subscriber string -// - query string -// - outCapacity ...int + + + + + func (_e *MockCosmosClient_Expecter) SubscribeToEvents(ctx interface{}, subscriber interface{}, query interface{}, outCapacity ...interface{}) *MockCosmosClient_SubscribeToEvents_Call { return &MockCosmosClient_SubscribeToEvents_Call{Call: _e.mock.On("SubscribeToEvents", append([]interface{}{ctx, subscriber, query}, outCapacity...)...)} @@ -573,7 +573,7 @@ func (_c *MockCosmosClient_SubscribeToEvents_Call) RunAndReturn(run func(context return _c } -// UnsubscribeAll provides a mock function with given fields: ctx, subscriber + func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) @@ -591,14 +591,14 @@ func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber strin return r0 } -// MockCosmosClient_UnsubscribeAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAll' + type MockCosmosClient_UnsubscribeAll_Call struct { *mock.Call } -// UnsubscribeAll is a helper method to define mock.On call -// - ctx context.Context -// - subscriber string + + + func (_e *MockCosmosClient_Expecter) UnsubscribeAll(ctx interface{}, subscriber interface{}) *MockCosmosClient_UnsubscribeAll_Call { return &MockCosmosClient_UnsubscribeAll_Call{Call: _e.mock.On("UnsubscribeAll", ctx, subscriber)} } @@ -620,8 +620,8 @@ func (_c *MockCosmosClient_UnsubscribeAll_Call) RunAndReturn(run func(context.Co return _c } -// NewMockCosmosClient creates a new instance of MockCosmosClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockCosmosClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go index a609b4d42..c41be7a74 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package settlement @@ -15,7 +15,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockClientI is an autogenerated mock type for the ClientI type + type MockClientI struct { mock.Mock } @@ -28,7 +28,7 @@ func (_m *MockClientI) EXPECT() *MockClientI_Expecter { return &MockClientI_Expecter{mock: &_m.Mock} } -// GetAllSequencers provides a mock function with given fields: + func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -58,12 +58,12 @@ func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { return r0, r1 } -// MockClientI_GetAllSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllSequencers' + type MockClientI_GetAllSequencers_Call struct { *mock.Call } -// GetAllSequencers is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetAllSequencers() *MockClientI_GetAllSequencers_Call { return &MockClientI_GetAllSequencers_Call{Call: _e.mock.On("GetAllSequencers")} } @@ -85,7 +85,7 @@ func (_c *MockClientI_GetAllSequencers_Call) RunAndReturn(run func() ([]types.Se return _c } -// GetBatchAtHeight provides a mock function with given fields: index + func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -115,13 +115,13 @@ func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetriev return r0, r1 } -// MockClientI_GetBatchAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtHeight' + type MockClientI_GetBatchAtHeight_Call struct { *mock.Call } -// GetBatchAtHeight is a helper method to define mock.On call -// - index uint64 + + func (_e *MockClientI_Expecter) GetBatchAtHeight(index interface{}) *MockClientI_GetBatchAtHeight_Call { return &MockClientI_GetBatchAtHeight_Call{Call: _e.mock.On("GetBatchAtHeight", index)} } @@ -143,7 +143,7 @@ func (_c *MockClientI_GetBatchAtHeight_Call) RunAndReturn(run func(uint64) (*set return _c } -// GetBatchAtIndex provides a mock function with given fields: index + func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -173,13 +173,13 @@ func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieve return r0, r1 } -// MockClientI_GetBatchAtIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtIndex' + type MockClientI_GetBatchAtIndex_Call struct { *mock.Call } -// GetBatchAtIndex is a helper method to define mock.On call -// - index uint64 + + func (_e *MockClientI_Expecter) GetBatchAtIndex(index interface{}) *MockClientI_GetBatchAtIndex_Call { return &MockClientI_GetBatchAtIndex_Call{Call: _e.mock.On("GetBatchAtIndex", index)} } @@ -201,7 +201,7 @@ func (_c *MockClientI_GetBatchAtIndex_Call) RunAndReturn(run func(uint64) (*sett return _c } -// GetBondedSequencers provides a mock function with given fields: + func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -231,12 +231,12 @@ func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { return r0, r1 } -// MockClientI_GetBondedSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBondedSequencers' + type MockClientI_GetBondedSequencers_Call struct { *mock.Call } -// GetBondedSequencers is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetBondedSequencers() *MockClientI_GetBondedSequencers_Call { return &MockClientI_GetBondedSequencers_Call{Call: _e.mock.On("GetBondedSequencers")} } @@ -258,7 +258,7 @@ func (_c *MockClientI_GetBondedSequencers_Call) RunAndReturn(run func() ([]types return _c } -// GetLatestBatch provides a mock function with given fields: + func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { ret := _m.Called() @@ -288,12 +288,12 @@ func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) return r0, r1 } -// MockClientI_GetLatestBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBatch' + type MockClientI_GetLatestBatch_Call struct { *mock.Call } -// GetLatestBatch is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestBatch() *MockClientI_GetLatestBatch_Call { return &MockClientI_GetLatestBatch_Call{Call: _e.mock.On("GetLatestBatch")} } @@ -315,7 +315,7 @@ func (_c *MockClientI_GetLatestBatch_Call) RunAndReturn(run func() (*settlement. return _c } -// GetLatestFinalizedHeight provides a mock function with given fields: + func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { ret := _m.Called() @@ -343,12 +343,12 @@ func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { return r0, r1 } -// MockClientI_GetLatestFinalizedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestFinalizedHeight' + type MockClientI_GetLatestFinalizedHeight_Call struct { *mock.Call } -// GetLatestFinalizedHeight is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestFinalizedHeight() *MockClientI_GetLatestFinalizedHeight_Call { return &MockClientI_GetLatestFinalizedHeight_Call{Call: _e.mock.On("GetLatestFinalizedHeight")} } @@ -370,7 +370,7 @@ func (_c *MockClientI_GetLatestFinalizedHeight_Call) RunAndReturn(run func() (ui return _c } -// GetLatestHeight provides a mock function with given fields: + func (_m *MockClientI) GetLatestHeight() (uint64, error) { ret := _m.Called() @@ -398,12 +398,12 @@ func (_m *MockClientI) GetLatestHeight() (uint64, error) { return r0, r1 } -// MockClientI_GetLatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestHeight' + type MockClientI_GetLatestHeight_Call struct { *mock.Call } -// GetLatestHeight is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestHeight() *MockClientI_GetLatestHeight_Call { return &MockClientI_GetLatestHeight_Call{Call: _e.mock.On("GetLatestHeight")} } @@ -425,7 +425,7 @@ func (_c *MockClientI_GetLatestHeight_Call) RunAndReturn(run func() (uint64, err return _c } -// GetNextProposer provides a mock function with given fields: + func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { ret := _m.Called() @@ -455,12 +455,12 @@ func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { return r0, r1 } -// MockClientI_GetNextProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposer' + type MockClientI_GetNextProposer_Call struct { *mock.Call } -// GetNextProposer is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetNextProposer() *MockClientI_GetNextProposer_Call { return &MockClientI_GetNextProposer_Call{Call: _e.mock.On("GetNextProposer")} } @@ -482,7 +482,7 @@ func (_c *MockClientI_GetNextProposer_Call) RunAndReturn(run func() (*types.Sequ return _c } -// GetObsoleteDrs provides a mock function with given fields: + func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { ret := _m.Called() @@ -512,12 +512,12 @@ func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { return r0, r1 } -// MockClientI_GetObsoleteDrs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetObsoleteDrs' + type MockClientI_GetObsoleteDrs_Call struct { *mock.Call } -// GetObsoleteDrs is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetObsoleteDrs() *MockClientI_GetObsoleteDrs_Call { return &MockClientI_GetObsoleteDrs_Call{Call: _e.mock.On("GetObsoleteDrs")} } @@ -539,7 +539,7 @@ func (_c *MockClientI_GetObsoleteDrs_Call) RunAndReturn(run func() ([]uint32, er return _c } -// GetProposerAtHeight provides a mock function with given fields: height + func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ret := _m.Called(height) @@ -569,13 +569,13 @@ func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, erro return r0, r1 } -// MockClientI_GetProposerAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerAtHeight' + type MockClientI_GetProposerAtHeight_Call struct { *mock.Call } -// GetProposerAtHeight is a helper method to define mock.On call -// - height int64 + + func (_e *MockClientI_Expecter) GetProposerAtHeight(height interface{}) *MockClientI_GetProposerAtHeight_Call { return &MockClientI_GetProposerAtHeight_Call{Call: _e.mock.On("GetProposerAtHeight", height)} } @@ -597,7 +597,7 @@ func (_c *MockClientI_GetProposerAtHeight_Call) RunAndReturn(run func(int64) (*t return _c } -// GetRollapp provides a mock function with given fields: + func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { ret := _m.Called() @@ -627,12 +627,12 @@ func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { return r0, r1 } -// MockClientI_GetRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollapp' + type MockClientI_GetRollapp_Call struct { *mock.Call } -// GetRollapp is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetRollapp() *MockClientI_GetRollapp_Call { return &MockClientI_GetRollapp_Call{Call: _e.mock.On("GetRollapp")} } @@ -654,7 +654,7 @@ func (_c *MockClientI_GetRollapp_Call) RunAndReturn(run func() (*types.Rollapp, return _c } -// GetSequencerByAddress provides a mock function with given fields: address + func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, error) { ret := _m.Called(address) @@ -682,13 +682,13 @@ func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, e return r0, r1 } -// MockClientI_GetSequencerByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerByAddress' + type MockClientI_GetSequencerByAddress_Call struct { *mock.Call } -// GetSequencerByAddress is a helper method to define mock.On call -// - address string + + func (_e *MockClientI_Expecter) GetSequencerByAddress(address interface{}) *MockClientI_GetSequencerByAddress_Call { return &MockClientI_GetSequencerByAddress_Call{Call: _e.mock.On("GetSequencerByAddress", address)} } @@ -710,7 +710,7 @@ func (_c *MockClientI_GetSequencerByAddress_Call) RunAndReturn(run func(string) return _c } -// GetSignerBalance provides a mock function with given fields: + func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { ret := _m.Called() @@ -738,12 +738,12 @@ func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { return r0, r1 } -// MockClientI_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockClientI_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetSignerBalance() *MockClientI_GetSignerBalance_Call { return &MockClientI_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -765,7 +765,7 @@ func (_c *MockClientI_GetSignerBalance_Call) RunAndReturn(run func() (types.Bala return _c } -// Init provides a mock function with given fields: config, rollappId, _a2, logger, options + func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pubsub.Server, logger types.Logger, options ...settlement.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -790,17 +790,17 @@ func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pub return r0 } -// MockClientI_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' + type MockClientI_Init_Call struct { *mock.Call } -// Init is a helper method to define mock.On call -// - config settlement.Config -// - rollappId string -// - _a2 *pubsub.Server -// - logger types.Logger -// - options ...settlement.Option + + + + + + func (_e *MockClientI_Expecter) Init(config interface{}, rollappId interface{}, _a2 interface{}, logger interface{}, options ...interface{}) *MockClientI_Init_Call { return &MockClientI_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, rollappId, _a2, logger}, options...)...)} @@ -829,7 +829,7 @@ func (_c *MockClientI_Init_Call) RunAndReturn(run func(settlement.Config, string return _c } -// Start provides a mock function with given fields: + func (_m *MockClientI) Start() error { ret := _m.Called() @@ -847,12 +847,12 @@ func (_m *MockClientI) Start() error { return r0 } -// MockClientI_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockClientI_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockClientI_Expecter) Start() *MockClientI_Start_Call { return &MockClientI_Start_Call{Call: _e.mock.On("Start")} } @@ -874,7 +874,7 @@ func (_c *MockClientI_Start_Call) RunAndReturn(run func() error) *MockClientI_St return _c } -// Stop provides a mock function with given fields: + func (_m *MockClientI) Stop() error { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockClientI) Stop() error { return r0 } -// MockClientI_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockClientI_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockClientI_Expecter) Stop() *MockClientI_Stop_Call { return &MockClientI_Stop_Call{Call: _e.mock.On("Stop")} } @@ -919,7 +919,7 @@ func (_c *MockClientI_Stop_Call) RunAndReturn(run func() error) *MockClientI_Sto return _c } -// SubmitBatch provides a mock function with given fields: batch, daClient, daResult + func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { ret := _m.Called(batch, daClient, daResult) @@ -937,15 +937,15 @@ func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daRes return r0 } -// MockClientI_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' + type MockClientI_SubmitBatch_Call struct { *mock.Call } -// SubmitBatch is a helper method to define mock.On call -// - batch *types.Batch -// - daClient da.Client -// - daResult *da.ResultSubmitBatch + + + + func (_e *MockClientI_Expecter) SubmitBatch(batch interface{}, daClient interface{}, daResult interface{}) *MockClientI_SubmitBatch_Call { return &MockClientI_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch, daClient, daResult)} } @@ -967,7 +967,7 @@ func (_c *MockClientI_SubmitBatch_Call) RunAndReturn(run func(*types.Batch, da.C return _c } -// ValidateGenesisBridgeData provides a mock function with given fields: data + func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error { ret := _m.Called(data) @@ -985,13 +985,13 @@ func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) return r0 } -// MockClientI_ValidateGenesisBridgeData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridgeData' + type MockClientI_ValidateGenesisBridgeData_Call struct { *mock.Call } -// ValidateGenesisBridgeData is a helper method to define mock.On call -// - data rollapp.GenesisBridgeData + + func (_e *MockClientI_Expecter) ValidateGenesisBridgeData(data interface{}) *MockClientI_ValidateGenesisBridgeData_Call { return &MockClientI_ValidateGenesisBridgeData_Call{Call: _e.mock.On("ValidateGenesisBridgeData", data)} } @@ -1013,8 +1013,8 @@ func (_c *MockClientI_ValidateGenesisBridgeData_Call) RunAndReturn(run func(roll return _c } -// NewMockClientI creates a new instance of MockClientI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockClientI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go index 5035e135f..8ee0e6d75 100644 --- a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go +++ b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package store @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockStore is an autogenerated mock type for the Store type + type MockStore struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } -// Close provides a mock function with given fields: + func (_m *MockStore) Close() error { ret := _m.Called() @@ -44,12 +44,12 @@ func (_m *MockStore) Close() error { return r0 } -// MockStore_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' + type MockStore_Close_Call struct { *mock.Call } -// Close is a helper method to define mock.On call + func (_e *MockStore_Expecter) Close() *MockStore_Close_Call { return &MockStore_Close_Call{Call: _e.mock.On("Close")} } @@ -71,7 +71,7 @@ func (_c *MockStore_Close_Call) RunAndReturn(run func() error) *MockStore_Close_ return _c } -// LoadBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadBaseHeight() (uint64, error) { ret := _m.Called() @@ -99,12 +99,12 @@ func (_m *MockStore) LoadBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBaseHeight' + type MockStore_LoadBaseHeight_Call struct { *mock.Call } -// LoadBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadBaseHeight() *MockStore_LoadBaseHeight_Call { return &MockStore_LoadBaseHeight_Call{Call: _e.mock.On("LoadBaseHeight")} } @@ -126,7 +126,7 @@ func (_c *MockStore_LoadBaseHeight_Call) RunAndReturn(run func() (uint64, error) return _c } -// LoadBlock provides a mock function with given fields: height + func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { ret := _m.Called(height) @@ -156,13 +156,13 @@ func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { return r0, r1 } -// MockStore_LoadBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlock' + type MockStore_LoadBlock_Call struct { *mock.Call } -// LoadBlock is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlock(height interface{}) *MockStore_LoadBlock_Call { return &MockStore_LoadBlock_Call{Call: _e.mock.On("LoadBlock", height)} } @@ -184,7 +184,7 @@ func (_c *MockStore_LoadBlock_Call) RunAndReturn(run func(uint64) (*types.Block, return _c } -// LoadBlockByHash provides a mock function with given fields: hash + func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { ret := _m.Called(hash) @@ -214,13 +214,13 @@ func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return r0, r1 } -// MockStore_LoadBlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockByHash' + type MockStore_LoadBlockByHash_Call struct { *mock.Call } -// LoadBlockByHash is a helper method to define mock.On call -// - hash [32]byte + + func (_e *MockStore_Expecter) LoadBlockByHash(hash interface{}) *MockStore_LoadBlockByHash_Call { return &MockStore_LoadBlockByHash_Call{Call: _e.mock.On("LoadBlockByHash", hash)} } @@ -242,7 +242,7 @@ func (_c *MockStore_LoadBlockByHash_Call) RunAndReturn(run func([32]byte) (*type return _c } -// LoadBlockCid provides a mock function with given fields: height + func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { ret := _m.Called(height) @@ -270,13 +270,13 @@ func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { return r0, r1 } -// MockStore_LoadBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockCid' + type MockStore_LoadBlockCid_Call struct { *mock.Call } -// LoadBlockCid is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockCid(height interface{}) *MockStore_LoadBlockCid_Call { return &MockStore_LoadBlockCid_Call{Call: _e.mock.On("LoadBlockCid", height)} } @@ -298,7 +298,7 @@ func (_c *MockStore_LoadBlockCid_Call) RunAndReturn(run func(uint64) (cid.Cid, e return _c } -// LoadBlockResponses provides a mock function with given fields: height + func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, error) { ret := _m.Called(height) @@ -328,13 +328,13 @@ func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, er return r0, r1 } -// MockStore_LoadBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockResponses' + type MockStore_LoadBlockResponses_Call struct { *mock.Call } -// LoadBlockResponses is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockResponses(height interface{}) *MockStore_LoadBlockResponses_Call { return &MockStore_LoadBlockResponses_Call{Call: _e.mock.On("LoadBlockResponses", height)} } @@ -356,7 +356,7 @@ func (_c *MockStore_LoadBlockResponses_Call) RunAndReturn(run func(uint64) (*sta return _c } -// LoadBlockSource provides a mock function with given fields: height + func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { ret := _m.Called(height) @@ -384,13 +384,13 @@ func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { return r0, r1 } -// MockStore_LoadBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSource' + type MockStore_LoadBlockSource_Call struct { *mock.Call } -// LoadBlockSource is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockSource(height interface{}) *MockStore_LoadBlockSource_Call { return &MockStore_LoadBlockSource_Call{Call: _e.mock.On("LoadBlockSource", height)} } @@ -412,7 +412,7 @@ func (_c *MockStore_LoadBlockSource_Call) RunAndReturn(run func(uint64) (types.B return _c } -// LoadBlockSyncBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { ret := _m.Called() @@ -440,12 +440,12 @@ func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSyncBaseHeight' + type MockStore_LoadBlockSyncBaseHeight_Call struct { *mock.Call } -// LoadBlockSyncBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadBlockSyncBaseHeight() *MockStore_LoadBlockSyncBaseHeight_Call { return &MockStore_LoadBlockSyncBaseHeight_Call{Call: _e.mock.On("LoadBlockSyncBaseHeight")} } @@ -467,7 +467,7 @@ func (_c *MockStore_LoadBlockSyncBaseHeight_Call) RunAndReturn(run func() (uint6 return _c } -// LoadCommit provides a mock function with given fields: height + func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { ret := _m.Called(height) @@ -497,13 +497,13 @@ func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { return r0, r1 } -// MockStore_LoadCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommit' + type MockStore_LoadCommit_Call struct { *mock.Call } -// LoadCommit is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadCommit(height interface{}) *MockStore_LoadCommit_Call { return &MockStore_LoadCommit_Call{Call: _e.mock.On("LoadCommit", height)} } @@ -525,7 +525,7 @@ func (_c *MockStore_LoadCommit_Call) RunAndReturn(run func(uint64) (*types.Commi return _c } -// LoadCommitByHash provides a mock function with given fields: hash + func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { ret := _m.Called(hash) @@ -555,13 +555,13 @@ func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return r0, r1 } -// MockStore_LoadCommitByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommitByHash' + type MockStore_LoadCommitByHash_Call struct { *mock.Call } -// LoadCommitByHash is a helper method to define mock.On call -// - hash [32]byte + + func (_e *MockStore_Expecter) LoadCommitByHash(hash interface{}) *MockStore_LoadCommitByHash_Call { return &MockStore_LoadCommitByHash_Call{Call: _e.mock.On("LoadCommitByHash", hash)} } @@ -583,7 +583,7 @@ func (_c *MockStore_LoadCommitByHash_Call) RunAndReturn(run func([32]byte) (*typ return _c } -// LoadDRSVersion provides a mock function with given fields: height + func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { ret := _m.Called(height) @@ -611,13 +611,13 @@ func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { return r0, r1 } -// MockStore_LoadDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadDRSVersion' + type MockStore_LoadDRSVersion_Call struct { *mock.Call } -// LoadDRSVersion is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadDRSVersion(height interface{}) *MockStore_LoadDRSVersion_Call { return &MockStore_LoadDRSVersion_Call{Call: _e.mock.On("LoadDRSVersion", height)} } @@ -639,7 +639,7 @@ func (_c *MockStore_LoadDRSVersion_Call) RunAndReturn(run func(uint64) (uint32, return _c } -// LoadIndexerBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { ret := _m.Called() @@ -667,12 +667,12 @@ func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadIndexerBaseHeight' + type MockStore_LoadIndexerBaseHeight_Call struct { *mock.Call } -// LoadIndexerBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadIndexerBaseHeight() *MockStore_LoadIndexerBaseHeight_Call { return &MockStore_LoadIndexerBaseHeight_Call{Call: _e.mock.On("LoadIndexerBaseHeight")} } @@ -694,7 +694,7 @@ func (_c *MockStore_LoadIndexerBaseHeight_Call) RunAndReturn(run func() (uint64, return _c } -// LoadLastBlockSequencerSet provides a mock function with given fields: + func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { ret := _m.Called() @@ -724,12 +724,12 @@ func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { return r0, r1 } -// MockStore_LoadLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadLastBlockSequencerSet' + type MockStore_LoadLastBlockSequencerSet_Call struct { *mock.Call } -// LoadLastBlockSequencerSet is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadLastBlockSequencerSet() *MockStore_LoadLastBlockSequencerSet_Call { return &MockStore_LoadLastBlockSequencerSet_Call{Call: _e.mock.On("LoadLastBlockSequencerSet")} } @@ -751,7 +751,7 @@ func (_c *MockStore_LoadLastBlockSequencerSet_Call) RunAndReturn(run func() (typ return _c } -// LoadProposer provides a mock function with given fields: height + func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { ret := _m.Called(height) @@ -779,13 +779,13 @@ func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { return r0, r1 } -// MockStore_LoadProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadProposer' + type MockStore_LoadProposer_Call struct { *mock.Call } -// LoadProposer is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadProposer(height interface{}) *MockStore_LoadProposer_Call { return &MockStore_LoadProposer_Call{Call: _e.mock.On("LoadProposer", height)} } @@ -807,7 +807,7 @@ func (_c *MockStore_LoadProposer_Call) RunAndReturn(run func(uint64) (types.Sequ return _c } -// LoadState provides a mock function with given fields: + func (_m *MockStore) LoadState() (*types.State, error) { ret := _m.Called() @@ -837,12 +837,12 @@ func (_m *MockStore) LoadState() (*types.State, error) { return r0, r1 } -// MockStore_LoadState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadState' + type MockStore_LoadState_Call struct { *mock.Call } -// LoadState is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadState() *MockStore_LoadState_Call { return &MockStore_LoadState_Call{Call: _e.mock.On("LoadState")} } @@ -864,7 +864,7 @@ func (_c *MockStore_LoadState_Call) RunAndReturn(run func() (*types.State, error return _c } -// LoadValidationHeight provides a mock function with given fields: + func (_m *MockStore) LoadValidationHeight() (uint64, error) { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockStore) LoadValidationHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadValidationHeight' + type MockStore_LoadValidationHeight_Call struct { *mock.Call } -// LoadValidationHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadValidationHeight() *MockStore_LoadValidationHeight_Call { return &MockStore_LoadValidationHeight_Call{Call: _e.mock.On("LoadValidationHeight")} } @@ -919,7 +919,7 @@ func (_c *MockStore_LoadValidationHeight_Call) RunAndReturn(run func() (uint64, return _c } -// NewBatch provides a mock function with given fields: + func (_m *MockStore) NewBatch() store.KVBatch { ret := _m.Called() @@ -939,12 +939,12 @@ func (_m *MockStore) NewBatch() store.KVBatch { return r0 } -// MockStore_NewBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBatch' + type MockStore_NewBatch_Call struct { *mock.Call } -// NewBatch is a helper method to define mock.On call + func (_e *MockStore_Expecter) NewBatch() *MockStore_NewBatch_Call { return &MockStore_NewBatch_Call{Call: _e.mock.On("NewBatch")} } @@ -966,7 +966,7 @@ func (_c *MockStore_NewBatch_Call) RunAndReturn(run func() store.KVBatch) *MockS return _c } -// PruneStore provides a mock function with given fields: to, logger + func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { ret := _m.Called(to, logger) @@ -994,14 +994,14 @@ func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) return r0, r1 } -// MockStore_PruneStore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PruneStore' + type MockStore_PruneStore_Call struct { *mock.Call } -// PruneStore is a helper method to define mock.On call -// - to uint64 -// - logger types.Logger + + + func (_e *MockStore_Expecter) PruneStore(to interface{}, logger interface{}) *MockStore_PruneStore_Call { return &MockStore_PruneStore_Call{Call: _e.mock.On("PruneStore", to, logger)} } @@ -1023,7 +1023,7 @@ func (_c *MockStore_PruneStore_Call) RunAndReturn(run func(uint64, types.Logger) return _c } -// RemoveBlockCid provides a mock function with given fields: height + func (_m *MockStore) RemoveBlockCid(height uint64) error { ret := _m.Called(height) @@ -1041,13 +1041,13 @@ func (_m *MockStore) RemoveBlockCid(height uint64) error { return r0 } -// MockStore_RemoveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveBlockCid' + type MockStore_RemoveBlockCid_Call struct { *mock.Call } -// RemoveBlockCid is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) RemoveBlockCid(height interface{}) *MockStore_RemoveBlockCid_Call { return &MockStore_RemoveBlockCid_Call{Call: _e.mock.On("RemoveBlockCid", height)} } @@ -1069,7 +1069,7 @@ func (_c *MockStore_RemoveBlockCid_Call) RunAndReturn(run func(uint64) error) *M return _c } -// SaveBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1087,13 +1087,13 @@ func (_m *MockStore) SaveBaseHeight(height uint64) error { return r0 } -// MockStore_SaveBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBaseHeight' + type MockStore_SaveBaseHeight_Call struct { *mock.Call } -// SaveBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveBaseHeight(height interface{}) *MockStore_SaveBaseHeight_Call { return &MockStore_SaveBaseHeight_Call{Call: _e.mock.On("SaveBaseHeight", height)} } @@ -1115,7 +1115,7 @@ func (_c *MockStore_SaveBaseHeight_Call) RunAndReturn(run func(uint64) error) *M return _c } -// SaveBlock provides a mock function with given fields: block, commit, batch + func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(block, commit, batch) @@ -1145,15 +1145,15 @@ func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch s return r0, r1 } -// MockStore_SaveBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlock' + type MockStore_SaveBlock_Call struct { *mock.Call } -// SaveBlock is a helper method to define mock.On call -// - block *types.Block -// - commit *types.Commit -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlock(block interface{}, commit interface{}, batch interface{}) *MockStore_SaveBlock_Call { return &MockStore_SaveBlock_Call{Call: _e.mock.On("SaveBlock", block, commit, batch)} } @@ -1175,7 +1175,7 @@ func (_c *MockStore_SaveBlock_Call) RunAndReturn(run func(*types.Block, *types.C return _c } -// SaveBlockCid provides a mock function with given fields: height, _a1, batch + func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, _a1, batch) @@ -1205,15 +1205,15 @@ func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatc return r0, r1 } -// MockStore_SaveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockCid' + type MockStore_SaveBlockCid_Call struct { *mock.Call } -// SaveBlockCid is a helper method to define mock.On call -// - height uint64 -// - _a1 cid.Cid -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockCid(height interface{}, _a1 interface{}, batch interface{}) *MockStore_SaveBlockCid_Call { return &MockStore_SaveBlockCid_Call{Call: _e.mock.On("SaveBlockCid", height, _a1, batch)} } @@ -1235,7 +1235,7 @@ func (_c *MockStore_SaveBlockCid_Call) RunAndReturn(run func(uint64, cid.Cid, st return _c } -// SaveBlockResponses provides a mock function with given fields: height, responses, batch + func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResponses, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, responses, batch) @@ -1265,15 +1265,15 @@ func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResp return r0, r1 } -// MockStore_SaveBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockResponses' + type MockStore_SaveBlockResponses_Call struct { *mock.Call } -// SaveBlockResponses is a helper method to define mock.On call -// - height uint64 -// - responses *state.ABCIResponses -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockResponses(height interface{}, responses interface{}, batch interface{}) *MockStore_SaveBlockResponses_Call { return &MockStore_SaveBlockResponses_Call{Call: _e.mock.On("SaveBlockResponses", height, responses, batch)} } @@ -1295,7 +1295,7 @@ func (_c *MockStore_SaveBlockResponses_Call) RunAndReturn(run func(uint64, *stat return _c } -// SaveBlockSource provides a mock function with given fields: height, source, batch + func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, source, batch) @@ -1325,15 +1325,15 @@ func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, ba return r0, r1 } -// MockStore_SaveBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSource' + type MockStore_SaveBlockSource_Call struct { *mock.Call } -// SaveBlockSource is a helper method to define mock.On call -// - height uint64 -// - source types.BlockSource -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockSource(height interface{}, source interface{}, batch interface{}) *MockStore_SaveBlockSource_Call { return &MockStore_SaveBlockSource_Call{Call: _e.mock.On("SaveBlockSource", height, source, batch)} } @@ -1355,7 +1355,7 @@ func (_c *MockStore_SaveBlockSource_Call) RunAndReturn(run func(uint64, types.Bl return _c } -// SaveBlockSyncBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1373,13 +1373,13 @@ func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { return r0 } -// MockStore_SaveBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSyncBaseHeight' + type MockStore_SaveBlockSyncBaseHeight_Call struct { *mock.Call } -// SaveBlockSyncBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveBlockSyncBaseHeight(height interface{}) *MockStore_SaveBlockSyncBaseHeight_Call { return &MockStore_SaveBlockSyncBaseHeight_Call{Call: _e.mock.On("SaveBlockSyncBaseHeight", height)} } @@ -1401,7 +1401,7 @@ func (_c *MockStore_SaveBlockSyncBaseHeight_Call) RunAndReturn(run func(uint64) return _c } -// SaveDRSVersion provides a mock function with given fields: height, version, batch + func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, version, batch) @@ -1431,15 +1431,15 @@ func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.K return r0, r1 } -// MockStore_SaveDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveDRSVersion' + type MockStore_SaveDRSVersion_Call struct { *mock.Call } -// SaveDRSVersion is a helper method to define mock.On call -// - height uint64 -// - version uint32 -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveDRSVersion(height interface{}, version interface{}, batch interface{}) *MockStore_SaveDRSVersion_Call { return &MockStore_SaveDRSVersion_Call{Call: _e.mock.On("SaveDRSVersion", height, version, batch)} } @@ -1461,7 +1461,7 @@ func (_c *MockStore_SaveDRSVersion_Call) RunAndReturn(run func(uint64, uint32, s return _c } -// SaveIndexerBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1479,13 +1479,13 @@ func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { return r0 } -// MockStore_SaveIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveIndexerBaseHeight' + type MockStore_SaveIndexerBaseHeight_Call struct { *mock.Call } -// SaveIndexerBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveIndexerBaseHeight(height interface{}) *MockStore_SaveIndexerBaseHeight_Call { return &MockStore_SaveIndexerBaseHeight_Call{Call: _e.mock.On("SaveIndexerBaseHeight", height)} } @@ -1507,7 +1507,7 @@ func (_c *MockStore_SaveIndexerBaseHeight_Call) RunAndReturn(run func(uint64) er return _c } -// SaveLastBlockSequencerSet provides a mock function with given fields: sequencers, batch + func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(sequencers, batch) @@ -1537,14 +1537,14 @@ func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batc return r0, r1 } -// MockStore_SaveLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastBlockSequencerSet' + type MockStore_SaveLastBlockSequencerSet_Call struct { *mock.Call } -// SaveLastBlockSequencerSet is a helper method to define mock.On call -// - sequencers types.Sequencers -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveLastBlockSequencerSet(sequencers interface{}, batch interface{}) *MockStore_SaveLastBlockSequencerSet_Call { return &MockStore_SaveLastBlockSequencerSet_Call{Call: _e.mock.On("SaveLastBlockSequencerSet", sequencers, batch)} } @@ -1566,7 +1566,7 @@ func (_c *MockStore_SaveLastBlockSequencerSet_Call) RunAndReturn(run func(types. return _c } -// SaveProposer provides a mock function with given fields: height, proposer, batch + func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, proposer, batch) @@ -1596,15 +1596,15 @@ func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch return r0, r1 } -// MockStore_SaveProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveProposer' + type MockStore_SaveProposer_Call struct { *mock.Call } -// SaveProposer is a helper method to define mock.On call -// - height uint64 -// - proposer types.Sequencer -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveProposer(height interface{}, proposer interface{}, batch interface{}) *MockStore_SaveProposer_Call { return &MockStore_SaveProposer_Call{Call: _e.mock.On("SaveProposer", height, proposer, batch)} } @@ -1626,7 +1626,7 @@ func (_c *MockStore_SaveProposer_Call) RunAndReturn(run func(uint64, types.Seque return _c } -// SaveState provides a mock function with given fields: _a0, batch + func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(_a0, batch) @@ -1656,14 +1656,14 @@ func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVB return r0, r1 } -// MockStore_SaveState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveState' + type MockStore_SaveState_Call struct { *mock.Call } -// SaveState is a helper method to define mock.On call -// - _a0 *types.State -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveState(_a0 interface{}, batch interface{}) *MockStore_SaveState_Call { return &MockStore_SaveState_Call{Call: _e.mock.On("SaveState", _a0, batch)} } @@ -1685,7 +1685,7 @@ func (_c *MockStore_SaveState_Call) RunAndReturn(run func(*types.State, store.KV return _c } -// SaveValidationHeight provides a mock function with given fields: height, batch + func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, batch) @@ -1715,14 +1715,14 @@ func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (s return r0, r1 } -// MockStore_SaveValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveValidationHeight' + type MockStore_SaveValidationHeight_Call struct { *mock.Call } -// SaveValidationHeight is a helper method to define mock.On call -// - height uint64 -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveValidationHeight(height interface{}, batch interface{}) *MockStore_SaveValidationHeight_Call { return &MockStore_SaveValidationHeight_Call{Call: _e.mock.On("SaveValidationHeight", height, batch)} } @@ -1744,8 +1744,8 @@ func (_c *MockStore_SaveValidationHeight_Call) RunAndReturn(run func(uint64, sto return _c } -// NewMockStore creates a new instance of MockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockStore(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go index c2ce005b5..775ec233d 100644 --- a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -12,7 +12,7 @@ import ( types "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *types.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *typ return r0, r1 } -// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' + type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } -// GetNextProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetNextProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } -// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.Q return r0, r1 } -// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' + type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } -// GetProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequest, opts ...grpc.CallOption) (*types.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequ return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *t return _c } -// Sequencer provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequencerRequest, opts ...grpc.CallOption) (*types.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequ return r0, r1 } -// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' + type MockQueryClient_Sequencer_Call struct { *mock.Call } -// Sequencer is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencerRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequencersRequest, opts ...grpc.CallOption) (*types.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequen return r0, r1 } -// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' + type MockQueryClient_Sequencers_Call struct { *mock.Call } -// Sequencers is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QuerySequencersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } -// SequencersByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.Qu return r0, r1 } -// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' + type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } -// SequencersByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencersByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } -// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *types.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } -// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' + type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } -// SequencersByRollappByStatus is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencersByRollappByStatusRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -543,8 +543,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go index 80d7ab986..c73eb7ea5 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package rollapp @@ -12,7 +12,7 @@ import ( rollapp "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// LatestHeight provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGetLatestHeightRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestHeightResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGe return r0, r1 } -// MockQueryClient_LatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestHeight' + type MockQueryClient_LatestHeight_Call struct { *mock.Call } -// LatestHeight is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetLatestHeightRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) LatestHeight(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestHeight_Call { return &MockQueryClient_LatestHeight_Call{Call: _e.mock.On("LatestHeight", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_LatestHeight_Call) RunAndReturn(run func(context.Conte return _c } -// LatestStateIndex provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.QueryGetLatestStateIndexRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestStateIndexResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.Que return r0, r1 } -// MockQueryClient_LatestStateIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestStateIndex' + type MockQueryClient_LatestStateIndex_Call struct { *mock.Call } -// LatestStateIndex is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetLatestStateIndexRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) LatestStateIndex(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestStateIndex_Call { return &MockQueryClient_LatestStateIndex_Call{Call: _e.mock.On("LatestStateIndex", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_LatestStateIndex_Call) RunAndReturn(run func(context.C return _c } -// ObsoleteDRSVersions provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp.QueryObsoleteDRSVersionsRequest, opts ...grpc.CallOption) (*rollapp.QueryObsoleteDRSVersionsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp. return r0, r1 } -// MockQueryClient_ObsoleteDRSVersions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObsoleteDRSVersions' + type MockQueryClient_ObsoleteDRSVersions_Call struct { *mock.Call } -// ObsoleteDRSVersions is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryObsoleteDRSVersionsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) ObsoleteDRSVersions(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ObsoleteDRSVersions_Call { return &MockQueryClient_ObsoleteDRSVersions_Call{Call: _e.mock.On("ObsoleteDRSVersions", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_ObsoleteDRSVersions_Call) RunAndReturn(run func(contex return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRequest, opts ...grpc.CallOption) (*rollapp.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRe return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *r return _c } -// RegisteredDenoms provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.QueryRegisteredDenomsRequest, opts ...grpc.CallOption) (*rollapp.QueryRegisteredDenomsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.Que return r0, r1 } -// MockQueryClient_RegisteredDenoms_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisteredDenoms' + type MockQueryClient_RegisteredDenoms_Call struct { *mock.Call } -// RegisteredDenoms is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryRegisteredDenomsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RegisteredDenoms(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RegisteredDenoms_Call { return &MockQueryClient_RegisteredDenoms_Call{Call: _e.mock.On("RegisteredDenoms", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_RegisteredDenoms_Call) RunAndReturn(run func(context.C return _c } -// Rollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRoll return r0, r1 } -// MockQueryClient_Rollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rollapp' + type MockQueryClient_Rollapp_Call struct { *mock.Call } -// Rollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Rollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Rollapp_Call { return &MockQueryClient_Rollapp_Call{Call: _e.mock.On("Rollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Rollapp_Call) RunAndReturn(run func(context.Context, * return _c } -// RollappAll provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryAllRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllR return r0, r1 } -// MockQueryClient_RollappAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappAll' + type MockQueryClient_RollappAll_Call struct { *mock.Call } -// RollappAll is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryAllRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RollappAll(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappAll_Call { return &MockQueryClient_RollappAll_Call{Call: _e.mock.On("RollappAll", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_RollappAll_Call) RunAndReturn(run func(context.Context return _c } -// RollappByEIP155 provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.QueryGetRollappByEIP155Request, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.Quer return r0, r1 } -// MockQueryClient_RollappByEIP155_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappByEIP155' + type MockQueryClient_RollappByEIP155_Call struct { *mock.Call } -// RollappByEIP155 is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetRollappByEIP155Request -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RollappByEIP155(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappByEIP155_Call { return &MockQueryClient_RollappByEIP155_Call{Call: _e.mock.On("RollappByEIP155", append([]interface{}{ctx, in}, opts...)...)} @@ -617,7 +617,7 @@ func (_c *MockQueryClient_RollappByEIP155_Call) RunAndReturn(run func(context.Co return _c } -// StateInfo provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetStateInfoRequest, opts ...grpc.CallOption) (*rollapp.QueryGetStateInfoResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -654,15 +654,15 @@ func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetSt return r0, r1 } -// MockQueryClient_StateInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateInfo' + type MockQueryClient_StateInfo_Call struct { *mock.Call } -// StateInfo is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetStateInfoRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) StateInfo(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_StateInfo_Call { return &MockQueryClient_StateInfo_Call{Call: _e.mock.On("StateInfo", append([]interface{}{ctx, in}, opts...)...)} @@ -691,7 +691,7 @@ func (_c *MockQueryClient_StateInfo_Call) RunAndReturn(run func(context.Context, return _c } -// ValidateGenesisBridge provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollapp.QueryValidateGenesisBridgeRequest, opts ...grpc.CallOption) (*rollapp.QueryValidateGenesisBridgeResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -728,15 +728,15 @@ func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollap return r0, r1 } -// MockQueryClient_ValidateGenesisBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridge' + type MockQueryClient_ValidateGenesisBridge_Call struct { *mock.Call } -// ValidateGenesisBridge is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryValidateGenesisBridgeRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) ValidateGenesisBridge(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ValidateGenesisBridge_Call { return &MockQueryClient_ValidateGenesisBridge_Call{Call: _e.mock.On("ValidateGenesisBridge", append([]interface{}{ctx, in}, opts...)...)} @@ -765,8 +765,8 @@ func (_c *MockQueryClient_ValidateGenesisBridge_Call) RunAndReturn(run func(cont return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go index af5bcaf4b..0b76b1a9b 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package sequencer @@ -12,7 +12,7 @@ import ( sequencer "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *sequencer.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *seq return r0, r1 } -// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' + type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } -// GetNextProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetNextProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } -// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequencer.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequenc return r0, r1 } -// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' + type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } -// GetProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParamsRequest, opts ...grpc.CallOption) (*sequencer.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParams return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *s return _c } -// Proposers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryProposersRequest, opts ...grpc.CallOption) (*sequencer.QueryProposersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryPro return r0, r1 } -// MockQueryClient_Proposers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Proposers' + type MockQueryClient_Proposers_Call struct { *mock.Call } -// Proposers is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryProposersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Proposers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Proposers_Call { return &MockQueryClient_Proposers_Call{Call: _e.mock.On("Proposers", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Proposers_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencer provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGetSequencerRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGet return r0, r1 } -// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' + type MockQueryClient_Sequencer_Call struct { *mock.Call } -// Sequencer is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencerRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySequencersRequest, opts ...grpc.CallOption) (*sequencer.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySe return r0, r1 } -// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' + type MockQueryClient_Sequencers_Call struct { *mock.Call } -// Sequencers is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QuerySequencersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("Sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } -// SequencersByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequencer.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequence return r0, r1 } -// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' + type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } -// SequencersByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencersByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } -// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *sequencer.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } -// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' + type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } -// SequencersByRollappByStatus is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencersByRollappByStatusRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -617,8 +617,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go index 7393ef94e..db13fb1e2 100644 --- a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go +++ b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -7,7 +7,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) -// MockApplication is an autogenerated mock type for the Application type + type MockApplication struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockApplication) EXPECT() *MockApplication_Expecter { return &MockApplication_Expecter{mock: &_m.Mock} } -// ApplySnapshotChunk provides a mock function with given fields: _a0 + func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { ret := _m.Called(_a0) @@ -38,13 +38,13 @@ func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChun return r0 } -// MockApplication_ApplySnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySnapshotChunk' + type MockApplication_ApplySnapshotChunk_Call struct { *mock.Call } -// ApplySnapshotChunk is a helper method to define mock.On call -// - _a0 types.RequestApplySnapshotChunk + + func (_e *MockApplication_Expecter) ApplySnapshotChunk(_a0 interface{}) *MockApplication_ApplySnapshotChunk_Call { return &MockApplication_ApplySnapshotChunk_Call{Call: _e.mock.On("ApplySnapshotChunk", _a0)} } @@ -66,7 +66,7 @@ func (_c *MockApplication_ApplySnapshotChunk_Call) RunAndReturn(run func(types.R return _c } -// BeginBlock provides a mock function with given fields: _a0 + func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock { ret := _m.Called(_a0) @@ -84,13 +84,13 @@ func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.Respons return r0 } -// MockApplication_BeginBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlock' + type MockApplication_BeginBlock_Call struct { *mock.Call } -// BeginBlock is a helper method to define mock.On call -// - _a0 types.RequestBeginBlock + + func (_e *MockApplication_Expecter) BeginBlock(_a0 interface{}) *MockApplication_BeginBlock_Call { return &MockApplication_BeginBlock_Call{Call: _e.mock.On("BeginBlock", _a0)} } @@ -112,7 +112,7 @@ func (_c *MockApplication_BeginBlock_Call) RunAndReturn(run func(types.RequestBe return _c } -// CheckTx provides a mock function with given fields: _a0 + func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx { ret := _m.Called(_a0) @@ -130,13 +130,13 @@ func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheck return r0 } -// MockApplication_CheckTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckTx' + type MockApplication_CheckTx_Call struct { *mock.Call } -// CheckTx is a helper method to define mock.On call -// - _a0 types.RequestCheckTx + + func (_e *MockApplication_Expecter) CheckTx(_a0 interface{}) *MockApplication_CheckTx_Call { return &MockApplication_CheckTx_Call{Call: _e.mock.On("CheckTx", _a0)} } @@ -158,7 +158,7 @@ func (_c *MockApplication_CheckTx_Call) RunAndReturn(run func(types.RequestCheck return _c } -// Commit provides a mock function with given fields: + func (_m *MockApplication) Commit() types.ResponseCommit { ret := _m.Called() @@ -176,12 +176,12 @@ func (_m *MockApplication) Commit() types.ResponseCommit { return r0 } -// MockApplication_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' + type MockApplication_Commit_Call struct { *mock.Call } -// Commit is a helper method to define mock.On call + func (_e *MockApplication_Expecter) Commit() *MockApplication_Commit_Call { return &MockApplication_Commit_Call{Call: _e.mock.On("Commit")} } @@ -203,7 +203,7 @@ func (_c *MockApplication_Commit_Call) RunAndReturn(run func() types.ResponseCom return _c } -// DeliverTx provides a mock function with given fields: _a0 + func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx { ret := _m.Called(_a0) @@ -221,13 +221,13 @@ func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseD return r0 } -// MockApplication_DeliverTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTx' + type MockApplication_DeliverTx_Call struct { *mock.Call } -// DeliverTx is a helper method to define mock.On call -// - _a0 types.RequestDeliverTx + + func (_e *MockApplication_Expecter) DeliverTx(_a0 interface{}) *MockApplication_DeliverTx_Call { return &MockApplication_DeliverTx_Call{Call: _e.mock.On("DeliverTx", _a0)} } @@ -249,7 +249,7 @@ func (_c *MockApplication_DeliverTx_Call) RunAndReturn(run func(types.RequestDel return _c } -// EndBlock provides a mock function with given fields: _a0 + func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock { ret := _m.Called(_a0) @@ -267,13 +267,13 @@ func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEnd return r0 } -// MockApplication_EndBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlock' + type MockApplication_EndBlock_Call struct { *mock.Call } -// EndBlock is a helper method to define mock.On call -// - _a0 types.RequestEndBlock + + func (_e *MockApplication_Expecter) EndBlock(_a0 interface{}) *MockApplication_EndBlock_Call { return &MockApplication_EndBlock_Call{Call: _e.mock.On("EndBlock", _a0)} } @@ -295,7 +295,7 @@ func (_c *MockApplication_EndBlock_Call) RunAndReturn(run func(types.RequestEndB return _c } -// Info provides a mock function with given fields: _a0 + func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { ret := _m.Called(_a0) @@ -313,13 +313,13 @@ func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { return r0 } -// MockApplication_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' + type MockApplication_Info_Call struct { *mock.Call } -// Info is a helper method to define mock.On call -// - _a0 types.RequestInfo + + func (_e *MockApplication_Expecter) Info(_a0 interface{}) *MockApplication_Info_Call { return &MockApplication_Info_Call{Call: _e.mock.On("Info", _a0)} } @@ -341,7 +341,7 @@ func (_c *MockApplication_Info_Call) RunAndReturn(run func(types.RequestInfo) ty return _c } -// InitChain provides a mock function with given fields: _a0 + func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain { ret := _m.Called(_a0) @@ -359,13 +359,13 @@ func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseI return r0 } -// MockApplication_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' + type MockApplication_InitChain_Call struct { *mock.Call } -// InitChain is a helper method to define mock.On call -// - _a0 types.RequestInitChain + + func (_e *MockApplication_Expecter) InitChain(_a0 interface{}) *MockApplication_InitChain_Call { return &MockApplication_InitChain_Call{Call: _e.mock.On("InitChain", _a0)} } @@ -387,7 +387,7 @@ func (_c *MockApplication_InitChain_Call) RunAndReturn(run func(types.RequestIni return _c } -// ListSnapshots provides a mock function with given fields: _a0 + func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots { ret := _m.Called(_a0) @@ -405,13 +405,13 @@ func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.R return r0 } -// MockApplication_ListSnapshots_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSnapshots' + type MockApplication_ListSnapshots_Call struct { *mock.Call } -// ListSnapshots is a helper method to define mock.On call -// - _a0 types.RequestListSnapshots + + func (_e *MockApplication_Expecter) ListSnapshots(_a0 interface{}) *MockApplication_ListSnapshots_Call { return &MockApplication_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0)} } @@ -433,7 +433,7 @@ func (_c *MockApplication_ListSnapshots_Call) RunAndReturn(run func(types.Reques return _c } -// LoadSnapshotChunk provides a mock function with given fields: _a0 + func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { ret := _m.Called(_a0) @@ -451,13 +451,13 @@ func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) return r0 } -// MockApplication_LoadSnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadSnapshotChunk' + type MockApplication_LoadSnapshotChunk_Call struct { *mock.Call } -// LoadSnapshotChunk is a helper method to define mock.On call -// - _a0 types.RequestLoadSnapshotChunk + + func (_e *MockApplication_Expecter) LoadSnapshotChunk(_a0 interface{}) *MockApplication_LoadSnapshotChunk_Call { return &MockApplication_LoadSnapshotChunk_Call{Call: _e.mock.On("LoadSnapshotChunk", _a0)} } @@ -479,7 +479,7 @@ func (_c *MockApplication_LoadSnapshotChunk_Call) RunAndReturn(run func(types.Re return _c } -// OfferSnapshot provides a mock function with given fields: _a0 + func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot { ret := _m.Called(_a0) @@ -497,13 +497,13 @@ func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.R return r0 } -// MockApplication_OfferSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OfferSnapshot' + type MockApplication_OfferSnapshot_Call struct { *mock.Call } -// OfferSnapshot is a helper method to define mock.On call -// - _a0 types.RequestOfferSnapshot + + func (_e *MockApplication_Expecter) OfferSnapshot(_a0 interface{}) *MockApplication_OfferSnapshot_Call { return &MockApplication_OfferSnapshot_Call{Call: _e.mock.On("OfferSnapshot", _a0)} } @@ -525,7 +525,7 @@ func (_c *MockApplication_OfferSnapshot_Call) RunAndReturn(run func(types.Reques return _c } -// Query provides a mock function with given fields: _a0 + func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { ret := _m.Called(_a0) @@ -543,13 +543,13 @@ func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { return r0 } -// MockApplication_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' + type MockApplication_Query_Call struct { *mock.Call } -// Query is a helper method to define mock.On call -// - _a0 types.RequestQuery + + func (_e *MockApplication_Expecter) Query(_a0 interface{}) *MockApplication_Query_Call { return &MockApplication_Query_Call{Call: _e.mock.On("Query", _a0)} } @@ -571,7 +571,7 @@ func (_c *MockApplication_Query_Call) RunAndReturn(run func(types.RequestQuery) return _c } -// SetOption provides a mock function with given fields: _a0 + func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseSetOption { ret := _m.Called(_a0) @@ -589,13 +589,13 @@ func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseS return r0 } -// MockApplication_SetOption_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetOption' + type MockApplication_SetOption_Call struct { *mock.Call } -// SetOption is a helper method to define mock.On call -// - _a0 types.RequestSetOption + + func (_e *MockApplication_Expecter) SetOption(_a0 interface{}) *MockApplication_SetOption_Call { return &MockApplication_SetOption_Call{Call: _e.mock.On("SetOption", _a0)} } @@ -617,8 +617,8 @@ func (_c *MockApplication_SetOption_Call) RunAndReturn(run func(types.RequestSet return _c } -// NewMockApplication creates a new instance of MockApplication. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockApplication(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go index 9ec6b2d18..fc03566e5 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package proxy @@ -9,7 +9,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) -// MockAppConnConsensus is an autogenerated mock type for the AppConnConsensus type + type MockAppConnConsensus struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConnConsensus) EXPECT() *MockAppConnConsensus_Expecter { return &MockAppConnConsensus_Expecter{mock: &_m.Mock} } -// BeginBlockSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0) @@ -52,13 +52,13 @@ func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*ty return r0, r1 } -// MockAppConnConsensus_BeginBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlockSync' + type MockAppConnConsensus_BeginBlockSync_Call struct { *mock.Call } -// BeginBlockSync is a helper method to define mock.On call -// - _a0 types.RequestBeginBlock + + func (_e *MockAppConnConsensus_Expecter) BeginBlockSync(_a0 interface{}) *MockAppConnConsensus_BeginBlockSync_Call { return &MockAppConnConsensus_BeginBlockSync_Call{Call: _e.mock.On("BeginBlockSync", _a0)} } @@ -80,7 +80,7 @@ func (_c *MockAppConnConsensus_BeginBlockSync_Call) RunAndReturn(run func(types. return _c } -// CommitSync provides a mock function with given fields: + func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { ret := _m.Called() @@ -110,12 +110,12 @@ func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } -// MockAppConnConsensus_CommitSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CommitSync' + type MockAppConnConsensus_CommitSync_Call struct { *mock.Call } -// CommitSync is a helper method to define mock.On call + func (_e *MockAppConnConsensus_Expecter) CommitSync() *MockAppConnConsensus_CommitSync_Call { return &MockAppConnConsensus_CommitSync_Call{Call: _e.mock.On("CommitSync")} } @@ -137,7 +137,7 @@ func (_c *MockAppConnConsensus_CommitSync_Call) RunAndReturn(run func() (*types. return _c } -// DeliverTxAsync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { ret := _m.Called(_a0) @@ -157,13 +157,13 @@ func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abci return r0 } -// MockAppConnConsensus_DeliverTxAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTxAsync' + type MockAppConnConsensus_DeliverTxAsync_Call struct { *mock.Call } -// DeliverTxAsync is a helper method to define mock.On call -// - _a0 types.RequestDeliverTx + + func (_e *MockAppConnConsensus_Expecter) DeliverTxAsync(_a0 interface{}) *MockAppConnConsensus_DeliverTxAsync_Call { return &MockAppConnConsensus_DeliverTxAsync_Call{Call: _e.mock.On("DeliverTxAsync", _a0)} } @@ -185,7 +185,7 @@ func (_c *MockAppConnConsensus_DeliverTxAsync_Call) RunAndReturn(run func(types. return _c } -// EndBlockSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0) @@ -215,13 +215,13 @@ func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types. return r0, r1 } -// MockAppConnConsensus_EndBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlockSync' + type MockAppConnConsensus_EndBlockSync_Call struct { *mock.Call } -// EndBlockSync is a helper method to define mock.On call -// - _a0 types.RequestEndBlock + + func (_e *MockAppConnConsensus_Expecter) EndBlockSync(_a0 interface{}) *MockAppConnConsensus_EndBlockSync_Call { return &MockAppConnConsensus_EndBlockSync_Call{Call: _e.mock.On("EndBlockSync", _a0)} } @@ -243,7 +243,7 @@ func (_c *MockAppConnConsensus_EndBlockSync_Call) RunAndReturn(run func(types.Re return _c } -// Error provides a mock function with given fields: + func (_m *MockAppConnConsensus) Error() error { ret := _m.Called() @@ -261,12 +261,12 @@ func (_m *MockAppConnConsensus) Error() error { return r0 } -// MockAppConnConsensus_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' + type MockAppConnConsensus_Error_Call struct { *mock.Call } -// Error is a helper method to define mock.On call + func (_e *MockAppConnConsensus_Expecter) Error() *MockAppConnConsensus_Error_Call { return &MockAppConnConsensus_Error_Call{Call: _e.mock.On("Error")} } @@ -288,7 +288,7 @@ func (_c *MockAppConnConsensus_Error_Call) RunAndReturn(run func() error) *MockA return _c } -// InitChainSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0) @@ -318,13 +318,13 @@ func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*type return r0, r1 } -// MockAppConnConsensus_InitChainSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChainSync' + type MockAppConnConsensus_InitChainSync_Call struct { *mock.Call } -// InitChainSync is a helper method to define mock.On call -// - _a0 types.RequestInitChain + + func (_e *MockAppConnConsensus_Expecter) InitChainSync(_a0 interface{}) *MockAppConnConsensus_InitChainSync_Call { return &MockAppConnConsensus_InitChainSync_Call{Call: _e.mock.On("InitChainSync", _a0)} } @@ -346,18 +346,18 @@ func (_c *MockAppConnConsensus_InitChainSync_Call) RunAndReturn(run func(types.R return _c } -// SetResponseCallback provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } -// MockAppConnConsensus_SetResponseCallback_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetResponseCallback' + type MockAppConnConsensus_SetResponseCallback_Call struct { *mock.Call } -// SetResponseCallback is a helper method to define mock.On call -// - _a0 abcicli.Callback + + func (_e *MockAppConnConsensus_Expecter) SetResponseCallback(_a0 interface{}) *MockAppConnConsensus_SetResponseCallback_Call { return &MockAppConnConsensus_SetResponseCallback_Call{Call: _e.mock.On("SetResponseCallback", _a0)} } @@ -379,8 +379,8 @@ func (_c *MockAppConnConsensus_SetResponseCallback_Call) RunAndReturn(run func(a return _c } -// NewMockAppConnConsensus creates a new instance of MockAppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockAppConnConsensus(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go index affc90a4e..ea1b7934a 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package proxy @@ -9,7 +9,7 @@ import ( proxy "github.com/tendermint/tendermint/proxy" ) -// MockAppConns is an autogenerated mock type for the AppConns type + type MockAppConns struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConns) EXPECT() *MockAppConns_Expecter { return &MockAppConns_Expecter{mock: &_m.Mock} } -// Consensus provides a mock function with given fields: + func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { ret := _m.Called() @@ -42,12 +42,12 @@ func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { return r0 } -// MockAppConns_Consensus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consensus' + type MockAppConns_Consensus_Call struct { *mock.Call } -// Consensus is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Consensus() *MockAppConns_Consensus_Call { return &MockAppConns_Consensus_Call{Call: _e.mock.On("Consensus")} } @@ -69,7 +69,7 @@ func (_c *MockAppConns_Consensus_Call) RunAndReturn(run func() proxy.AppConnCons return _c } -// IsRunning provides a mock function with given fields: + func (_m *MockAppConns) IsRunning() bool { ret := _m.Called() @@ -87,12 +87,12 @@ func (_m *MockAppConns) IsRunning() bool { return r0 } -// MockAppConns_IsRunning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsRunning' + type MockAppConns_IsRunning_Call struct { *mock.Call } -// IsRunning is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) IsRunning() *MockAppConns_IsRunning_Call { return &MockAppConns_IsRunning_Call{Call: _e.mock.On("IsRunning")} } @@ -114,7 +114,7 @@ func (_c *MockAppConns_IsRunning_Call) RunAndReturn(run func() bool) *MockAppCon return _c } -// Mempool provides a mock function with given fields: + func (_m *MockAppConns) Mempool() proxy.AppConnMempool { ret := _m.Called() @@ -134,12 +134,12 @@ func (_m *MockAppConns) Mempool() proxy.AppConnMempool { return r0 } -// MockAppConns_Mempool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mempool' + type MockAppConns_Mempool_Call struct { *mock.Call } -// Mempool is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Mempool() *MockAppConns_Mempool_Call { return &MockAppConns_Mempool_Call{Call: _e.mock.On("Mempool")} } @@ -161,7 +161,7 @@ func (_c *MockAppConns_Mempool_Call) RunAndReturn(run func() proxy.AppConnMempoo return _c } -// OnReset provides a mock function with given fields: + func (_m *MockAppConns) OnReset() error { ret := _m.Called() @@ -179,12 +179,12 @@ func (_m *MockAppConns) OnReset() error { return r0 } -// MockAppConns_OnReset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnReset' + type MockAppConns_OnReset_Call struct { *mock.Call } -// OnReset is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnReset() *MockAppConns_OnReset_Call { return &MockAppConns_OnReset_Call{Call: _e.mock.On("OnReset")} } @@ -206,7 +206,7 @@ func (_c *MockAppConns_OnReset_Call) RunAndReturn(run func() error) *MockAppConn return _c } -// OnStart provides a mock function with given fields: + func (_m *MockAppConns) OnStart() error { ret := _m.Called() @@ -224,12 +224,12 @@ func (_m *MockAppConns) OnStart() error { return r0 } -// MockAppConns_OnStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStart' + type MockAppConns_OnStart_Call struct { *mock.Call } -// OnStart is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnStart() *MockAppConns_OnStart_Call { return &MockAppConns_OnStart_Call{Call: _e.mock.On("OnStart")} } @@ -251,17 +251,17 @@ func (_c *MockAppConns_OnStart_Call) RunAndReturn(run func() error) *MockAppConn return _c } -// OnStop provides a mock function with given fields: + func (_m *MockAppConns) OnStop() { _m.Called() } -// MockAppConns_OnStop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStop' + type MockAppConns_OnStop_Call struct { *mock.Call } -// OnStop is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnStop() *MockAppConns_OnStop_Call { return &MockAppConns_OnStop_Call{Call: _e.mock.On("OnStop")} } @@ -283,7 +283,7 @@ func (_c *MockAppConns_OnStop_Call) RunAndReturn(run func()) *MockAppConns_OnSto return _c } -// Query provides a mock function with given fields: + func (_m *MockAppConns) Query() proxy.AppConnQuery { ret := _m.Called() @@ -303,12 +303,12 @@ func (_m *MockAppConns) Query() proxy.AppConnQuery { return r0 } -// MockAppConns_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' + type MockAppConns_Query_Call struct { *mock.Call } -// Query is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Query() *MockAppConns_Query_Call { return &MockAppConns_Query_Call{Call: _e.mock.On("Query")} } @@ -330,7 +330,7 @@ func (_c *MockAppConns_Query_Call) RunAndReturn(run func() proxy.AppConnQuery) * return _c } -// Quit provides a mock function with given fields: + func (_m *MockAppConns) Quit() <-chan struct{} { ret := _m.Called() @@ -350,12 +350,12 @@ func (_m *MockAppConns) Quit() <-chan struct{} { return r0 } -// MockAppConns_Quit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Quit' + type MockAppConns_Quit_Call struct { *mock.Call } -// Quit is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Quit() *MockAppConns_Quit_Call { return &MockAppConns_Quit_Call{Call: _e.mock.On("Quit")} } @@ -377,7 +377,7 @@ func (_c *MockAppConns_Quit_Call) RunAndReturn(run func() <-chan struct{}) *Mock return _c } -// Reset provides a mock function with given fields: + func (_m *MockAppConns) Reset() error { ret := _m.Called() @@ -395,12 +395,12 @@ func (_m *MockAppConns) Reset() error { return r0 } -// MockAppConns_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' + type MockAppConns_Reset_Call struct { *mock.Call } -// Reset is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Reset() *MockAppConns_Reset_Call { return &MockAppConns_Reset_Call{Call: _e.mock.On("Reset")} } @@ -422,18 +422,18 @@ func (_c *MockAppConns_Reset_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } -// SetLogger provides a mock function with given fields: _a0 + func (_m *MockAppConns) SetLogger(_a0 log.Logger) { _m.Called(_a0) } -// MockAppConns_SetLogger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLogger' + type MockAppConns_SetLogger_Call struct { *mock.Call } -// SetLogger is a helper method to define mock.On call -// - _a0 log.Logger + + func (_e *MockAppConns_Expecter) SetLogger(_a0 interface{}) *MockAppConns_SetLogger_Call { return &MockAppConns_SetLogger_Call{Call: _e.mock.On("SetLogger", _a0)} } @@ -455,7 +455,7 @@ func (_c *MockAppConns_SetLogger_Call) RunAndReturn(run func(log.Logger)) *MockA return _c } -// Snapshot provides a mock function with given fields: + func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { ret := _m.Called() @@ -475,12 +475,12 @@ func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { return r0 } -// MockAppConns_Snapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Snapshot' + type MockAppConns_Snapshot_Call struct { *mock.Call } -// Snapshot is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Snapshot() *MockAppConns_Snapshot_Call { return &MockAppConns_Snapshot_Call{Call: _e.mock.On("Snapshot")} } @@ -502,7 +502,7 @@ func (_c *MockAppConns_Snapshot_Call) RunAndReturn(run func() proxy.AppConnSnaps return _c } -// Start provides a mock function with given fields: + func (_m *MockAppConns) Start() error { ret := _m.Called() @@ -520,12 +520,12 @@ func (_m *MockAppConns) Start() error { return r0 } -// MockAppConns_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockAppConns_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Start() *MockAppConns_Start_Call { return &MockAppConns_Start_Call{Call: _e.mock.On("Start")} } @@ -547,7 +547,7 @@ func (_c *MockAppConns_Start_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } -// Stop provides a mock function with given fields: + func (_m *MockAppConns) Stop() error { ret := _m.Called() @@ -565,12 +565,12 @@ func (_m *MockAppConns) Stop() error { return r0 } -// MockAppConns_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockAppConns_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Stop() *MockAppConns_Stop_Call { return &MockAppConns_Stop_Call{Call: _e.mock.On("Stop")} } @@ -592,7 +592,7 @@ func (_c *MockAppConns_Stop_Call) RunAndReturn(run func() error) *MockAppConns_S return _c } -// String provides a mock function with given fields: + func (_m *MockAppConns) String() string { ret := _m.Called() @@ -610,12 +610,12 @@ func (_m *MockAppConns) String() string { return r0 } -// MockAppConns_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' + type MockAppConns_String_Call struct { *mock.Call } -// String is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) String() *MockAppConns_String_Call { return &MockAppConns_String_Call{Call: _e.mock.On("String")} } @@ -637,8 +637,8 @@ func (_c *MockAppConns_String_Call) RunAndReturn(run func() string) *MockAppConn return _c } -// NewMockAppConns creates a new instance of MockAppConns. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockAppConns(t interface { mock.TestingT Cleanup(func()) diff --git a/node/events/types.go b/node/events/types.go index 3af471f10..069ee116c 100644 --- a/node/events/types.go +++ b/node/events/types.go @@ -6,24 +6,24 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) -// Type Keys + const ( - // NodeTypeKey is a reserved composite key for event name. + NodeTypeKey = "node.event" ) -// Types + const ( HealthStatus = "HealthStatus" ) -// Convenience + var HealthStatusList = map[string][]string{NodeTypeKey: {HealthStatus}} type DataHealthStatus struct { - // Error is the error that was encountered in case of a health check failure. Nil implies healthy. + Error error } @@ -31,6 +31,6 @@ func (dhs DataHealthStatus) String() string { return fmt.Sprintf("DataHealthStatus{Error: %v}", dhs.Error) } -// Queries + var QueryHealthStatus = uevent.QueryFor(NodeTypeKey, HealthStatus) diff --git a/node/mempool/mempool.go b/node/mempool/mempool.go index 80477193c..1d7c53310 100644 --- a/node/mempool/mempool.go +++ b/node/mempool/mempool.go @@ -15,12 +15,12 @@ const ( type MempoolIDs struct { mtx tmsync.RWMutex peerMap map[peer.ID]uint16 - nextID uint16 // assumes that a node will never have over 65536 active peers - activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter + nextID uint16 + activeIDs map[uint16]struct{} } -// Reserve searches for the next unused ID and assigns it to the -// peer. + + func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -30,8 +30,8 @@ func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.activeIDs[curID] = struct{}{} } -// nextPeerID returns the next unused peer ID to use. -// This assumes that ids's mutex is already locked. + + func (ids *MempoolIDs) nextPeerID() uint16 { if len(ids.activeIDs) == maxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) @@ -47,7 +47,7 @@ func (ids *MempoolIDs) nextPeerID() uint16 { return curID } -// Reclaim returns the ID reserved for the peer back to unused pool. + func (ids *MempoolIDs) Reclaim(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -59,7 +59,7 @@ func (ids *MempoolIDs) Reclaim(peer peer.ID) { } } -// GetForPeer returns an ID for the peer. ID is generated if required. + func (ids *MempoolIDs) GetForPeer(peer peer.ID) uint16 { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -78,6 +78,6 @@ func NewMempoolIDs() *MempoolIDs { return &MempoolIDs{ peerMap: make(map[peer.ID]uint16), activeIDs: map[uint16]struct{}{0: {}}, - nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx + nextID: 1, } } diff --git a/node/node.go b/node/node.go index f0f1a88e5..7cfe74792 100644 --- a/node/node.go +++ b/node/node.go @@ -34,15 +34,15 @@ import ( "github.com/dymensionxyz/dymint/store" ) -// prefixes used in KV store to separate main node data from DALC data + var ( mainPrefix = []byte{0} dalcPrefix = []byte{1} indexerPrefix = []byte{2} ) -// Node represents a client node in Dymint network. -// It connects all the components and orchestrates their work. + + type Node struct { service.BaseService eventBus *tmtypes.EventBus @@ -54,7 +54,7 @@ type Node struct { conf config.NodeConfig P2P *p2p.Client - // TODO(tzdybal): consider extracting "mempool reactor" + Mempool mempool.Mempool MempoolIDs *nodemempool.MempoolIDs incomingTxCh chan *p2p.GossipMessage @@ -68,12 +68,12 @@ type Node struct { BlockIndexer indexer.BlockIndexer IndexerService *txindex.IndexerService - // shared context for all dymint components + ctx context.Context cancel context.CancelFunc } -// NewNode creates new Dymint node. + func NewNode( ctx context.Context, conf config.NodeConfig, @@ -102,12 +102,12 @@ func NewNode( var baseKV store.KV var dstore datastore.Datastore - if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { // this is used for testing + if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { logger.Info("WARNING: working in in-memory mode") baseKV = store.NewDefaultInMemoryKVStore() dstore = datastore.NewMapDatastore() } else { - // TODO(omritoptx): Move dymint to const + baseKV = store.NewKVStore(conf.RootDir, conf.DBPath, "dymint", conf.DBConfig.SyncWrites, logger) path := filepath.Join(store.Rootify(conf.RootDir, conf.DBPath), "blocksync") var err error @@ -120,9 +120,9 @@ func NewNode( s := store.New(store.NewPrefixKV(baseKV, mainPrefix)) indexerKV := store.NewPrefixKV(baseKV, indexerPrefix) - // TODO: dalcKV is needed for mock only. Initialize only if mock used + dalcKV := store.NewPrefixKV(baseKV, dalcPrefix) - // Init the settlement layer client + settlementlc := slregistry.GetClient(slregistry.Client(conf.SettlementLayer)) if settlementlc == nil { return nil, fmt.Errorf("get settlement client: named: %s", conf.SettlementLayer) @@ -161,7 +161,7 @@ func NewNode( settlementlc, eventBus, pubsubServer, - nil, // p2p client is set later + nil, dalcKV, indexerService, logger, @@ -170,7 +170,7 @@ func NewNode( return nil, fmt.Errorf("BlockManager initialization: %w", err) } - // Set p2p client and it's validators + p2pValidator := p2p.NewValidator(logger.With("module", "p2p_validator"), blockManager) p2pClient, err := p2p.NewClient(conf.P2PConfig, p2pKey, genesis.ChainID, s, pubsubServer, dstore, logger.With("module", "p2p")) if err != nil { @@ -179,7 +179,7 @@ func NewNode( p2pClient.SetTxValidator(p2pValidator.TxValidator(mp, mpIDs)) p2pClient.SetBlockValidator(p2pValidator.BlockValidator()) - // Set p2p client in block manager + blockManager.P2PClient = p2pClient ctx, cancel := context.WithCancel(ctx) @@ -209,7 +209,7 @@ func NewNode( return node, nil } -// OnStart is a part of Service interface. + func (n *Node) OnStart() error { n.Logger.Info("starting P2P client") err := n.P2P.Start(n.ctx) @@ -234,7 +234,7 @@ func (n *Node) OnStart() error { } }() - // start the block manager + err = n.BlockManager.Start(n.ctx) if err != nil { return fmt.Errorf("while starting block manager: %w", err) @@ -243,12 +243,12 @@ func (n *Node) OnStart() error { return nil } -// GetGenesis returns entire genesis doc. + func (n *Node) GetGenesis() *tmtypes.GenesisDoc { return n.genesis } -// OnStop is a part of Service interface. + func (n *Node) OnStop() { err := n.BlockManager.DAClient.Stop() if err != nil { @@ -273,32 +273,32 @@ func (n *Node) OnStop() { n.cancel() } -// OnReset is a part of Service interface. + func (n *Node) OnReset() error { panic("OnReset - not implemented!") } -// SetLogger sets the logger used by node. + func (n *Node) SetLogger(logger log.Logger) { n.Logger = logger } -// GetLogger returns logger. + func (n *Node) GetLogger() log.Logger { return n.Logger } -// EventBus gives access to Node's event bus. + func (n *Node) EventBus() *tmtypes.EventBus { return n.eventBus } -// PubSubServer gives access to the Node's pubsub server + func (n *Node) PubSubServer() *pubsub.Server { return n.PubsubServer } -// ProxyApp returns ABCI proxy connections to communicate with application. + func (n *Node) ProxyApp() proxy.AppConns { return n.proxyApp } diff --git a/p2p/block.go b/p2p/block.go index d6da3da96..754c17973 100644 --- a/p2p/block.go +++ b/p2p/block.go @@ -6,24 +6,24 @@ import ( tmcrypto "github.com/tendermint/tendermint/crypto" ) -/* -------------------------------------------------------------------------- */ -/* Event Data */ -/* -------------------------------------------------------------------------- */ -// BlockData defines the struct of the data for each block sent via P2P + + + + type BlockData struct { - // Block is the block that was gossiped + Block types.Block - // Commit is the commit that was gossiped + Commit types.Commit } -// MarshalBinary encodes BlockData into binary form and returns it. + func (b *BlockData) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of p2p received block into object. + func (b *BlockData) UnmarshalBinary(data []byte) error { var pbBlock pb.BlockData err := pbBlock.Unmarshal(data) @@ -34,7 +34,7 @@ func (b *BlockData) UnmarshalBinary(data []byte) error { return err } -// ToProto converts Data into protobuf representation and returns it. + func (b *BlockData) ToProto() *pb.BlockData { return &pb.BlockData{ Block: b.Block.ToProto(), @@ -42,7 +42,7 @@ func (b *BlockData) ToProto() *pb.BlockData { } } -// FromProto fills BlockData with data from its protobuf representation. + func (b *BlockData) FromProto(other *pb.BlockData) error { if err := b.Block.FromProto(other.Block); err != nil { return err @@ -53,7 +53,7 @@ func (b *BlockData) FromProto(other *pb.BlockData) error { return nil } -// Validate run basic validation on the p2p block received + func (b *BlockData) Validate(proposerPubKey tmcrypto.PubKey) error { if err := b.Block.ValidateBasic(); err != nil { return err diff --git a/p2p/block_sync.go b/p2p/block_sync.go index f8be1e2c0..49cfefb93 100644 --- a/p2p/block_sync.go +++ b/p2p/block_sync.go @@ -20,48 +20,48 @@ import ( "github.com/libp2p/go-libp2p/core/host" ) -// Blocksync is a protocol used to retrieve blocks on demand from the P2P network. -// Nodes store received blocks from gossip in an IPFS blockstore and nodes are able to request them on demand using bitswap protocol. -// In order to discover the identifier (CID) of each block a DHT request needs to be made for the specific block height. -// Nodes need to advertise CIDs/height map to the DHT periodically. -// https://www.notion.so/dymension/ADR-x-Rollapp-block-sync-protocol-6ee48b232a6a45e09989d67f1a6c0297?pvs=4 + + + + + type BlockSync struct { - // service that reads/writes blocks either from local datastore or the P2P network + bsrv blockservice.BlockService - // local datastore for IPFS blocks + bstore blockstore.Blockstore - // protocol used to obtain blocks from the P2P network + net network.BitSwapNetwork - // used to find all data chunks that are part of the same block + dsrv BlockSyncDagService - // used to define the content identifiers of each data chunk + cidBuilder cid.Builder logger types.Logger } type BlockSyncMessageHandler func(block *BlockData) -// SetupBlockSync initializes all services required to provide and retrieve block data in the P2P network. + func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, logger types.Logger) *BlockSync { - // construct a datastore + ds := dsync.MutexWrap(store) - // set a blockstore (to store IPFS data chunks) with the previous datastore + bs := blockstore.NewBlockstore(ds) - // initialize bitswap network used to retrieve data chunks from other peers in the P2P network + bsnet := network.NewFromIpfsHost(h, &routinghelpers.Null{}, network.Prefix("/dymension/block-sync/")) - // Bitswap server that provides data to the network. + bsserver := server.New( ctx, bsnet, bs, - server.ProvideEnabled(false), // we don't provide blocks over DHT + server.ProvideEnabled(false), server.SetSendDontHaves(false), ) - // Bitswap client that retrieves data from the network. + bsclient := client.New( ctx, bsnet, @@ -71,7 +71,7 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, client.WithoutDuplicatedBlockStats(), ) - // start the network + bsnet.Start(bsserver, bsclient) bsrv := blockservice.New(bs, bsclient) @@ -93,12 +93,12 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, return blockSync } -// SaveBlock stores the blocks produced in the DAG services to be retrievable from the P2P network. + func (blocksync *BlockSync) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { return blocksync.dsrv.SaveBlock(ctx, block) } -// LoadBlock retrieves the blocks (from the local blockstore or the network) using the DAGService to discover all data chunks that are part of the same block. + func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockData, error) { blockBytes, err := blocksync.dsrv.LoadBlock(ctx, cid) if err != nil { @@ -111,7 +111,7 @@ func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockDa return block, nil } -// RemoveBlock removes the block from the DAGservice. + func (blocksync *BlockSync) DeleteBlock(ctx context.Context, cid cid.Cid) error { return blocksync.dsrv.DeleteBlock(ctx, cid) } diff --git a/p2p/block_sync_dag.go b/p2p/block_sync_dag.go index d9df4d440..2502d9cd5 100644 --- a/p2p/block_sync_dag.go +++ b/p2p/block_sync_dag.go @@ -21,8 +21,8 @@ type BlockSyncDagService struct { cidBuilder cid.Builder } -// NewDAGService inits the DAGservice used to retrieve/send blocks data in the P2P. -// Block data is organized in a merkle DAG using IPLD (https://ipld.io/docs/) + + func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { bsDagService := &BlockSyncDagService{ cidBuilder: &cid.Prefix{ @@ -37,15 +37,15 @@ func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { return *bsDagService } -// SaveBlock splits the block in chunks of 256KB and it creates a new merkle DAG with them. it returns the content identifier (cid) of the root node of the DAG. -// Using the root CID the whole block can be retrieved using the DAG service + + func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { blockReader := bytes.NewReader(block) splitter := chunker.NewSizeSplitter(blockReader, chunker.DefaultBlockSize) nodes := []*dag.ProtoNode{} - // the loop creates nodes for each block chunk and sets each cid + for { nextData, err := splitter.NextBytes() if err == io.EOF { @@ -63,14 +63,14 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] } - // an empty root node is created + root := dag.NodeWithData(nil) err := root.SetCidBuilder(bsDagService.cidBuilder) if err != nil { return cid.Undef, err } - // and linked to all chunks that are added to the DAGservice + for _, n := range nodes { err := root.AddNodeLink(n.Cid().String(), n) @@ -90,21 +90,21 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] return root.Cid(), nil } -// LoadBlock returns the block data obtained from the DAGService, using the root cid, either from the network or the local blockstore + func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid.Cid) ([]byte, error) { - // first it gets the root node + nd, err := bsDagService.Get(ctx, cid) if err != nil { return nil, err } - // then it gets all the data from the root node + read, err := dagReader(nd, bsDagService) if err != nil { return nil, err } - // the data is read to bytes array + data, err := io.ReadAll(read) if err != nil { return nil, err @@ -113,13 +113,13 @@ func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid. } func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid cid.Cid) error { - // first it gets the root node + root, err := bsDagService.Get(ctx, cid) if err != nil { return err } - // then it iterates all the cids to remove them from the block store + for _, l := range root.Links() { err := bsDagService.Remove(ctx, l.Cid) if err != nil { @@ -129,12 +129,12 @@ func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid ci return nil } -// dagReader is used to read the DAG (all the block chunks) from the root (IPLD) node + func dagReader(root ipld.Node, ds ipld.DAGService) (io.Reader, error) { ctx := context.Background() buf := new(bytes.Buffer) - // the loop retrieves all the nodes (block chunks) either from the local store or the network, in case it is not there. + for _, l := range root.Links() { n, err := ds.Get(ctx, l.Cid) if err != nil { diff --git a/p2p/blocks_received.go b/p2p/blocks_received.go index ceaf0bf67..0541f9599 100644 --- a/p2p/blocks_received.go +++ b/p2p/blocks_received.go @@ -2,15 +2,15 @@ package p2p import "sync" -// BlocksReceived tracks blocks received from P2P to know what are the missing blocks that need to be requested on demand + type BlocksReceived struct { blocksReceived map[uint64]struct{} latestSeenHeight uint64 - // mutex to protect blocksReceived map access + blockReceivedMu sync.Mutex } -// addBlockReceived adds the block height to a map + func (br *BlocksReceived) AddBlockReceived(height uint64) { br.latestSeenHeight = max(height, br.latestSeenHeight) br.blockReceivedMu.Lock() @@ -18,7 +18,7 @@ func (br *BlocksReceived) AddBlockReceived(height uint64) { br.blocksReceived[height] = struct{}{} } -// isBlockReceived checks if a block height is already received + func (br *BlocksReceived) IsBlockReceived(height uint64) bool { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -26,7 +26,7 @@ func (br *BlocksReceived) IsBlockReceived(height uint64) bool { return ok } -// removeBlocksReceivedUpToHeight clears previous received block heights + func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -37,7 +37,7 @@ func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { } } -// GetLatestSeenHeight returns the latest height stored + func (br *BlocksReceived) GetLatestSeenHeight() uint64 { return br.latestSeenHeight } diff --git a/p2p/client.go b/p2p/client.go index 596669f99..d21979efe 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -33,29 +33,29 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// TODO(tzdybal): refactor to configuration parameters + const ( - // reAdvertisePeriod defines a period after which P2P client re-attempt advertising namespace in DHT. + reAdvertisePeriod = 1 * time.Hour - // peerLimit defines limit of number of peers returned during active peer discovery. + peerLimit = 60 - // txTopicSuffix is added after namespace to create pubsub topic for TX gossiping. + txTopicSuffix = "-tx" - // blockTopicSuffix is added after namespace to create pubsub topic for block gossiping. + blockTopicSuffix = "-block" - // blockSyncProtocolSuffix is added after namespace to create blocksync protocol prefix. + blockSyncProtocolPrefix = "block-sync" ) -// Client is a P2P client, implemented with libp2p. -// -// Initially, client connects to predefined seed nodes (aka bootnodes, bootstrap nodes). -// Those seed nodes serve Kademlia DHT protocol, and are agnostic to ORU chain. Using DHT -// peer routing and discovery clients find other peers within ORU network. + + + + + type Client struct { conf config.P2PConfig chainID string @@ -71,18 +71,18 @@ type Client struct { blockGossiper *Gossiper blockValidator GossipValidator - // cancel is used to cancel context passed to libp2p functions - // it's required because of discovery.Advertise call + + cancel context.CancelFunc localPubsubServer *tmpubsub.Server logger types.Logger - // blocksync instance used to save and retrieve blocks from the P2P network on demand + blocksync *BlockSync - // store used to store retrievable blocks using blocksync + blockSyncStore datastore.Datastore store store.Store @@ -90,10 +90,10 @@ type Client struct { blocksReceived *BlocksReceived } -// NewClient creates new Client object. -// -// Basic checks on parameters are done, and default parameters are provided for unset-configuration -// TODO(tzdybal): consider passing entire config, not just P2P config, to reduce number of arguments + + + + func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, store store.Store, localPubsubServer *tmpubsub.Server, blockSyncStore datastore.Datastore, logger types.Logger) (*Client, error) { if privKey == nil { return nil, fmt.Errorf("private key: %w", gerrc.ErrNotFound) @@ -116,15 +116,15 @@ func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, st }, nil } -// Start establish Client's P2P connectivity. -// -// Following steps are taken: -// 1. Setup libp2p host, start listening for incoming connections. -// 2. Setup gossibsub. -// 3. Setup DHT, establish connection to seed nodes and initialize peer discovery. -// 4. Use active peer discovery to look for peers from same ORU network. + + + + + + + func (c *Client) Start(ctx context.Context) error { - // create new, cancelable context + ctx, c.cancel = context.WithCancel(ctx) host, err := c.listen() if err != nil { @@ -171,7 +171,7 @@ func (c *Client) StartWithHost(ctx context.Context, h host.Host) error { return nil } -// Close gently stops Client. + func (c *Client) Close() error { c.cancel() @@ -183,24 +183,24 @@ func (c *Client) Close() error { ) } -// GossipTx sends the transaction to the P2P network. + func (c *Client) GossipTx(ctx context.Context, tx []byte) error { c.logger.Debug("Gossiping transaction.", "len", len(tx)) return c.txGossiper.Publish(ctx, tx) } -// SetTxValidator sets the callback function, that will be invoked during message gossiping. + func (c *Client) SetTxValidator(val GossipValidator) { c.txValidator = val } -// GossipBlock sends the block, and it's commit to the P2P network. + func (c *Client) GossipBlock(ctx context.Context, blockBytes []byte) error { c.logger.Debug("Gossiping block.", "len", len(blockBytes)) return c.blockGossiper.Publish(ctx, blockBytes) } -// SaveBlock stores the block in the blocksync datastore, stores locally the returned identifier and advertises the identifier to the DHT, so other nodes can know the identifier for the block height. + func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, blockBytes []byte) error { if !c.conf.BlockSyncEnabled { return nil @@ -228,7 +228,7 @@ func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, return nil } -// RemoveBlocks is used to prune blocks from the block sync datastore. + func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { prunedBlocks := uint64(0) @@ -269,13 +269,13 @@ func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { return prunedBlocks, nil } -// AdvertiseBlockIdToDHT is used to advertise the identifier (cid) for a specific block height and revision to the DHT, using a PutValue operation + func (c *Client) AdvertiseBlockIdToDHT(ctx context.Context, height uint64, revision uint64, cid cid.Cid) error { err := c.DHT.PutValue(ctx, getBlockSyncKeyByHeight(height, revision), []byte(cid.String())) return err } -// GetBlockIdFromDHT is used to retrieve the identifier (cid) for a specific block height and revision from the DHT, using a GetValue operation + func (c *Client) GetBlockIdFromDHT(ctx context.Context, height uint64, revision uint64) (cid.Cid, error) { cidBytes, err := c.DHT.GetValue(ctx, getBlockSyncKeyByHeight(height, revision)) if err != nil { @@ -288,23 +288,23 @@ func (c *Client) UpdateLatestSeenHeight(height uint64) { c.blocksReceived.latestSeenHeight = max(height, c.blocksReceived.latestSeenHeight) } -// SetBlockValidator sets the callback function, that will be invoked after block is received from P2P network. + func (c *Client) SetBlockValidator(validator GossipValidator) { c.blockValidator = validator } -// Addrs returns listen addresses of Client. + func (c *Client) Addrs() []multiaddr.Multiaddr { return c.Host.Addrs() } -// Info returns p2p info + func (c *Client) Info() (p2p.ID, string, string) { return p2p.ID(hex.EncodeToString([]byte(c.Host.ID()))), c.conf.ListenAddress, c.chainID } -// PeerConnection describe basic information about P2P connection. -// TODO(tzdybal): move it somewhere + + type PeerConnection struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` @@ -312,7 +312,7 @@ type PeerConnection struct { RemoteIP string `json:"remote_ip"` } -// Peers returns list of peers connected to Client. + func (c *Client) Peers() []PeerConnection { conns := c.Host.Network().Conns() res := make([]PeerConnection, 0, len(conns)) @@ -322,12 +322,12 @@ func (c *Client) Peers() []PeerConnection { ListenAddr: c.conf.ListenAddress, Network: c.chainID, DefaultNodeID: p2p.ID(conn.RemotePeer().String()), - // TODO(tzdybal): fill more fields + }, IsOutbound: conn.Stat().Direction == network.DirOutbound, ConnectionStatus: p2p.ConnectionStatus{ Duration: time.Since(conn.Stat().Opened), - // TODO(tzdybal): fill more fields + }, RemoteIP: conn.RemoteMultiaddr().String(), } @@ -407,7 +407,7 @@ func (c *Client) peerDiscovery(ctx context.Context) error { } func (c *Client) setupPeerDiscovery(ctx context.Context) error { - // wait for DHT + select { case <-ctx.Done(): return nil @@ -443,7 +443,7 @@ func (c *Client) findPeers(ctx context.Context) error { return nil } -// tryConnect attempts to connect to a peer and logs error if necessary + func (c *Client) tryConnect(ctx context.Context, peer peer.AddrInfo) { c.logger.Debug("Trying to connect to peer.", "peer", peer) err := c.Host.Connect(ctx, peer) @@ -463,7 +463,7 @@ func (c *Client) setupGossiping(ctx context.Context) error { return err } - // tx gossiper receives the tx to add to the mempool through validation process, since it is a joint process + c.txGossiper, err = NewGossiper(c.Host, ps, c.getTxTopic(), nil, c.logger, WithValidator(c.txValidator)) if err != nil { return err @@ -502,43 +502,43 @@ func (c *Client) GetSeedAddrInfo(seedStr string) []peer.AddrInfo { return addrs } -// getNamespace returns unique string identifying ORU network. -// -// It is used to advertise/find peers in libp2p DHT. -// For now, chainID is used. + + + + func (c *Client) getNamespace() string { return c.chainID } -// topic used to transmit transactions in gossipsub + func (c *Client) getTxTopic() string { return c.getNamespace() + txTopicSuffix } -// topic used to transmit blocks in gossipsub + func (c *Client) getBlockTopic() string { return c.getNamespace() + blockTopicSuffix } -// NewTxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. If the transaction is valid, then it is added to the mempool + + func (c *Client) NewTxValidator() GossipValidator { return func(g *GossipMessage) bool { return true } } -// blockSyncReceived is called on reception of new block via blocksync protocol + func (c *Client) blockSyncReceived(block *BlockData) { err := c.localPubsubServer.PublishWithEvents(context.Background(), *block, map[string][]string{EventTypeKey: {EventNewBlockSyncBlock}}) if err != nil { c.logger.Error("Publishing event.", "err", err) } - // Received block is cached and no longer needed to request using blocksync + c.blocksReceived.AddBlockReceived(block.Block.Header.Height) } -// blockSyncReceived is called on reception of new block via gossip protocol + func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { var gossipedBlock BlockData if err := gossipedBlock.UnmarshalBinary(block); err != nil { @@ -550,7 +550,7 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { } if c.conf.BlockSyncEnabled { _, err := c.store.LoadBlockCid(gossipedBlock.Block.Header.Height) - // skip block already added to blocksync + if err == nil { return } @@ -558,13 +558,13 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { if err != nil { c.logger.Error("Adding block to blocksync store.", "err", err, "height", gossipedBlock.Block.Header.Height) } - // Received block is cached and no longer needed to request using blocksync + c.blocksReceived.AddBlockReceived(gossipedBlock.Block.Header.Height) } } -// bootstrapLoop is used to periodically check if the node is connected to other nodes in the P2P network, re-bootstrapping the DHT in case it is necessary, -// or to try to connect to the persistent peers + + func (c *Client) bootstrapLoop(ctx context.Context) { ticker := time.NewTicker(c.conf.BootstrapRetryTime) defer ticker.Stop() @@ -590,7 +590,7 @@ func (c *Client) bootstrapLoop(ctx context.Context) { } } -// retrieveBlockSyncLoop checks if there is any block not received, previous to the latest block height received, to request it on demand + func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSyncMessageHandler) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -600,7 +600,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync case <-ctx.Done(): return case <-ticker.C: - // if no connected at p2p level, dont try + if len(c.Peers()) == 0 { continue } @@ -609,8 +609,8 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync continue } - // this loop iterates and retrieves all the blocks between the last block applied and the greatest height received, - // skipping any block cached, since are already received. + + for h := state.NextHeight(); h <= c.blocksReceived.latestSeenHeight; h++ { if ctx.Err() != nil { return @@ -653,7 +653,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync } } -// advertiseBlockSyncCids is used to advertise all the block identifiers (cids) stored in the local store to the DHT on startup + func (c *Client) advertiseBlockSyncCids(ctx context.Context) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -663,7 +663,7 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - // if no connected at p2p level, it will try again after ticker time + if len(c.Peers()) == 0 { continue } @@ -693,13 +693,13 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { } } - // just try once and then quit when finished + return } } } -// findConnection returns true in case the node is already connected to the peer specified. + func (c *Client) findConnection(peer peer.AddrInfo) bool { for _, con := range c.Host.Network().Conns() { if peer.ID == con.RemotePeer() { @@ -713,7 +713,7 @@ func getBlockSyncKeyByHeight(height uint64, revision uint64) string { return "/" + blockSyncProtocolPrefix + "/" + strconv.FormatUint(revision, 10) + "/" + strconv.FormatUint(height, 10) } -// validates that the content identifiers advertised in the DHT are valid. + type blockIdValidator struct{} func (blockIdValidator) Validate(_ string, id []byte) error { diff --git a/p2p/events.go b/p2p/events.go index 45a0064a5..f88ca45e6 100644 --- a/p2p/events.go +++ b/p2p/events.go @@ -4,12 +4,12 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) -/* -------------------------------------------------------------------------- */ -/* Event types */ -/* -------------------------------------------------------------------------- */ + + + const ( - // EventTypeKey is a reserved composite key for event name. + EventTypeKey = "p2p.event" ) @@ -18,12 +18,12 @@ const ( EventNewBlockSyncBlock = "NewBlockSyncBlock" ) -/* -------------------------------------------------------------------------- */ -/* Queries */ -/* -------------------------------------------------------------------------- */ -// EventQueryNewGossipedBlock is the query used for getting EventNewGossipedBlock + + + + var EventQueryNewGossipedBlock = uevent.QueryFor(EventTypeKey, EventNewGossipedBlock) -// EventQueryNewBlockSyncBlock is the query used for getting EventNewBlockSyncBlock + var EventQueryNewBlockSyncBlock = uevent.QueryFor(EventTypeKey, EventNewBlockSyncBlock) diff --git a/p2p/gossip.go b/p2p/gossip.go index 6d4236e4c..2cb7c3f65 100644 --- a/p2p/gossip.go +++ b/p2p/gossip.go @@ -13,28 +13,28 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// buffer size used by gossipSub router to consume received packets (blocks or txs). packets are dropped in case buffer overflows. in case of blocks, it can buffer up to 5 minutes (assuming 200ms block rate) + const pubsubBufferSize = 3000 -// GossipMessage represents message gossiped via P2P network (e.g. transaction, Block etc). + type GossipMessage struct { Data []byte From peer.ID } -// GossiperOption sets optional parameters of Gossiper. + type GossiperOption func(*Gossiper) error type GossipMessageHandler func(ctx context.Context, gossipedBlock []byte) -// WithValidator options registers topic validator for Gossiper. + func WithValidator(validator GossipValidator) GossiperOption { return func(g *Gossiper) error { return g.ps.RegisterTopicValidator(g.topic.String(), wrapValidator(g, validator)) } } -// Gossiper is an abstraction of P2P publish subscribe mechanism. + type Gossiper struct { ownID peer.ID @@ -45,9 +45,9 @@ type Gossiper struct { logger types.Logger } -// NewGossiper creates new, ready to use instance of Gossiper. -// -// Returned Gossiper object can be used for sending (Publishing) and receiving messages in topic identified by topicStr. + + + func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler GossipMessageHandler, logger types.Logger, options ...GossiperOption) (*Gossiper, error) { topic, err := ps.Join(topicStr) if err != nil { @@ -76,7 +76,7 @@ func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler return g, nil } -// Close is used to disconnect from topic and free resources used by Gossiper. + func (g *Gossiper) Close() error { err := g.ps.UnregisterTopicValidator(g.topic.String()) g.sub.Cancel() @@ -86,12 +86,12 @@ func (g *Gossiper) Close() error { ) } -// Publish publishes data to gossip topic. + func (g *Gossiper) Publish(ctx context.Context, data []byte) error { return g.topic.Publish(ctx, data) } -// ProcessMessages waits for messages published in the topic and execute handler. + func (g *Gossiper) ProcessMessages(ctx context.Context) { for { msg, err := g.sub.Next(ctx) @@ -110,8 +110,8 @@ func (g *Gossiper) ProcessMessages(ctx context.Context) { func wrapValidator(gossiper *Gossiper, validator GossipValidator) pubsub.Validator { return func(_ context.Context, _ peer.ID, msg *pubsub.Message) bool { - // Make sure we don't process our own messages. - // In this case we'll want to return true but not to actually handle the message. + + if msg.GetFrom() == gossiper.ownID { return true } diff --git a/p2p/validator.go b/p2p/validator.go index 513714e4f..4c3b26c27 100644 --- a/p2p/validator.go +++ b/p2p/validator.go @@ -16,17 +16,17 @@ type StateGetter interface { GetRevision() uint64 } -// GossipValidator is a callback function type. + type GossipValidator func(*GossipMessage) bool -// IValidator is an interface for implementing validators of messages gossiped in the p2p network. + type IValidator interface { - // TxValidator creates a pubsub validator that uses the node's mempool to check the - // transaction. If the transaction is valid, then it is added to the mempool + + TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator } -// Validator is a validator for messages gossiped in the p2p network. + type Validator struct { logger types.Logger stateGetter StateGetter @@ -34,7 +34,7 @@ type Validator struct { var _ IValidator = (*Validator)(nil) -// NewValidator creates a new Validator. + func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { return &Validator{ logger: logger, @@ -42,9 +42,9 @@ func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { } } -// TxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. -// False means the TX is considered invalid and should not be gossiped. + + + func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator { return func(txMessage *GossipMessage) bool { v.logger.Debug("Transaction received.", "bytes", len(txMessage.Data)) @@ -59,7 +59,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo case errors.Is(err, mempool.ErrTxInCache): return true case errors.Is(err, mempool.ErrMempoolIsFull{}): - return true // we have no reason to believe that we should throw away the message + return true case errors.Is(err, mempool.ErrTxTooLarge{}): return false case errors.Is(err, mempool.ErrPreCheck{}): @@ -73,7 +73,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo } } -// BlockValidator runs basic checks on the gossiped block + func (v *Validator) BlockValidator() GossipValidator { return func(blockMsg *GossipMessage) bool { var gossipedBlock BlockData diff --git a/rpc/client/client.go b/rpc/client/client.go index d697476fb..e0b6b4a29 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -34,7 +34,7 @@ const ( defaultPerPage = 30 maxPerPage = 100 - // TODO(tzdybal): make this configurable + subscribeTimeout = 5 * time.Second ) @@ -46,20 +46,20 @@ const ( SLValidated ) -// ErrConsensusStateNotAvailable is returned because Dymint doesn't use Tendermint consensus. + var ErrConsensusStateNotAvailable = errors.New("consensus state not available in Dymint") var _ rpcclient.Client = &Client{} -// Client implements tendermint RPC client interface. -// -// This is the type that is used in communication between cosmos-sdk app and Dymint. + + + type Client struct { *tmtypes.EventBus config *config.RPCConfig node *node.Node - // cache of chunked genesis data. + genChunks []string } @@ -68,7 +68,7 @@ type ResultBlockValidated struct { Result BlockValidationStatus } -// NewClient returns Client working with given node. + func NewClient(node *node.Node) *Client { return &Client{ EventBus: node.EventBus(), @@ -77,7 +77,7 @@ func NewClient(node *node.Node) *Client { } } -// ABCIInfo returns basic information about application state. + func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := c.Query().InfoSync(proxy.RequestInfo) if err != nil { @@ -86,12 +86,12 @@ func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } -// ABCIQuery queries for data from application. + func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } -// ABCIQueryWithOptions queries for data from application. + func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { resQuery, err := c.Query().QuerySync(abci.RequestQuery{ Path: path, @@ -106,19 +106,19 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } -// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit + + func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // This implementation corresponds to Tendermints implementation from rpc/core/mempool.go. - // ctx.RemoteAddr godoc: If neither HTTPReq nor WSConn is set, an empty string is returned. - // This code is a local client, so we can assume that subscriber is "" - subscriber := "" // ctx.RemoteAddr() + + + + subscriber := "" if err := c.IsSubscriptionAllowed(subscriber); err != nil { return nil, sdkerrors.Wrap(err, "subscription not allowed") } - // Subscribe to tx being committed in block. + subCtx, cancel := context.WithTimeout(ctx, subscribeTimeout) defer cancel() q := tmtypes.EventQueryTxFor(tx) @@ -134,7 +134,7 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } }() - // add to mempool and wait for CheckTx result + checkTxResCh := make(chan *abci.Response, 1) err = c.node.Mempool.CheckTx(tx, func(res *abci.Response) { select { @@ -159,15 +159,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. }, nil } - // broadcast tx + err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failure to broadcast: %w", err) } - // Wait for the tx to be included in a block or timeout. + select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. + case msg := <-deliverTxSub.Out(): deliverTxRes, _ := msg.Data().(tmtypes.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -201,15 +201,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } } -// BroadcastTxAsync returns right away, with no response. Does not wait for -// CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async + + + func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { err := c.node.Mempool.CheckTx(tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - // gossipTx optimistically + err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failed to gossip: %w", err) @@ -217,9 +217,9 @@ func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.R return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } -// BroadcastTxSync returns with the response from CheckTx. Does not wait for -// DeliverTx result. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync + + + func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := c.node.Mempool.CheckTx(tx, func(res *abci.Response) { @@ -231,16 +231,16 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re res := <-resCh r := res.GetCheckTx() - // gossip the transaction if it's in the mempool. - // Note: we have to do this here because, unlike the tendermint mempool reactor, there - // is no routine that gossips transactions after they enter the pool + + + if r.Code == abci.CodeTypeOK { err = c.node.P2P.GossipTx(ctx, tx) if err != nil { - // the transaction must be removed from the mempool if it cannot be gossiped. - // if this does not occur, then the user will not be able to try again using - // this node, as the CheckTx call above will return an error indicating that - // the tx is already in the mempool + + + + _ = c.node.Mempool.RemoveTxByKey(tx.Key()) return nil, fmt.Errorf("gossip tx: %w", err) } @@ -255,7 +255,7 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re }, nil } -// Subscribe subscribe given subscriber to a query. + func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCap return outc, nil } -// Unsubscribe unsubscribes given subscriber from a query. + func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := tmquery.New(query) if err != nil { @@ -292,12 +292,12 @@ func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) erro return c.EventBus.Unsubscribe(ctx, subscriber, q) } -// Genesis returns entire genesis. + func (c *Client) Genesis(_ context.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: c.node.GetGenesis()}, nil } -// GenesisChunked returns given chunk of genesis. + func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { genChunks, err := c.GetGenesisChunks() if err != nil { @@ -312,19 +312,19 @@ func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenes return nil, fmt.Errorf("service configuration error, there are no chunks") } - // it's safe to do uint(chunkLen)-1 (no overflow) since we always have at least one chunk here + if id > uint(chunkLen)-1 { return nil, fmt.Errorf("there are %d chunks, %d is invalid", chunkLen-1, id) } return &ctypes.ResultGenesisChunk{ TotalChunks: chunkLen, - ChunkNumber: int(id), //nolint:gosec // id is always positive + ChunkNumber: int(id), Data: genChunks[id], }, nil } -// BlockchainInfo returns ABCI block meta information for given height range. + func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -336,8 +336,8 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) baseHeight = 1 } minHeight, maxHeight, err = filterMinMax( - int64(baseHeight), //nolint:gosec // height is non-negative and falls in int64 - int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 + int64(baseHeight), + int64(c.node.GetBlockManagerHeight()), minHeight, maxHeight, limit) @@ -348,7 +348,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) blocks := make([]*tmtypes.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { - block, err := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 + block, err := c.node.Store.LoadBlock(uint64(height)) if err != nil { return nil, err } @@ -362,12 +362,12 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) } return &ctypes.ResultBlockchainInfo{ - LastHeight: int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 + LastHeight: int64(c.node.GetBlockManagerHeight()), BlockMetas: blocks, }, nil } -// NetInfo returns basic information about client P2P connections. + func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { res := ctypes.ResultNetInfo{ Listening: true, @@ -389,24 +389,24 @@ func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return &res, nil } -// DumpConsensusState always returns error as there is no consensus state in Dymint. + func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return nil, ErrConsensusStateNotAvailable } -// ConsensusState always returns error as there is no consensus state in Dymint. + func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return nil, ErrConsensusStateNotAvailable } -// ConsensusParams returns consensus params at given height. -// -// Currently, consensus params changes are not supported and this method returns params as defined in genesis. + + + func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - // TODO(tzdybal): implement consensus params handling: https://github.com/dymensionxyz/dymint/issues/291 + params := c.node.GetGenesis().ConsensusParams return &ctypes.ResultConsensusParams{ - BlockHeight: int64(c.normalizeHeight(height)), //nolint:gosec // height is non-negative and falls in int64 + BlockHeight: int64(c.normalizeHeight(height)), ConsensusParams: tmproto.ConsensusParams{ Block: tmproto.BlockParams{ MaxBytes: params.Block.MaxBytes, @@ -428,14 +428,14 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re }, nil } -// Health endpoint returns empty value. It can be used to monitor service availability. + func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } -// Block method returns BlockID and block itself for given height. -// -// If height is nil, it returns information about last known block. + + + func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { heightValue := c.normalizeHeight(height) block, err := c.node.Store.LoadBlock(heightValue) @@ -459,7 +459,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, }, nil } -// BlockByHash returns BlockID and block itself for given hash. + func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { var h [32]byte copy(h[:], hash) @@ -485,13 +485,13 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl }, nil } -// BlockResults returns information about transactions, events and updates of validator set and consensus params. + func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h uint64 if height == nil { h = c.node.GetBlockManagerHeight() } else { - h = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 + h = uint64(*height) } resp, err := c.node.Store.LoadBlockResponses(h) if err != nil { @@ -499,7 +499,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } return &ctypes.ResultBlockResults{ - Height: int64(h), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(h), TxsResults: resp.DeliverTxs, BeginBlockEvents: resp.BeginBlock.Events, EndBlockEvents: resp.EndBlock.Events, @@ -508,7 +508,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul }, nil } -// Commit returns signed header (aka commit) at given height. + func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { heightValue := c.normalizeHeight(height) com, err := c.node.Store.LoadCommit(heightValue) @@ -528,7 +528,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return ctypes.NewResultCommit(&block.Header, commit, true), nil } -// Validators returns paginated list of validators at given height. + func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (*ctypes.ResultValidators, error) { height := c.normalizeHeight(heightPtr) @@ -538,14 +538,14 @@ func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (* } return &ctypes.ResultValidators{ - BlockHeight: int64(height), //nolint:gosec // height is non-negative and falls in int64 + BlockHeight: int64(height), Validators: proposer.TMValidators(), Count: 1, Total: 1, }, nil } -// Tx returns detailed information about transaction identified by its hash. + func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { res, err := c.node.TxIndexer.Get(hash) if err != nil { @@ -561,8 +561,8 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul var proof tmtypes.TxProof if prove { - block, _ := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 - blockProof := block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + block, _ := c.node.Store.LoadBlock(uint64(height)) + blockProof := block.Data.Txs.Proof(int(index)) proof = tmtypes.TxProof{ RootHash: blockProof.RootHash, Data: tmtypes.Tx(blockProof.Data), @@ -580,7 +580,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul }, nil } -// TxSearch returns detailed information about transactions matching query. + func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*ctypes.ResultTxSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -592,7 +592,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, err } - // sort results (must be done before pagination) + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -612,7 +612,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - // paginate results + totalCount := len(results) perPage := validatePerPage(perPagePtr) @@ -629,10 +629,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr r := results[i] var proof tmtypes.TxProof - /*if prove { - block := nil //env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines - }*/ + apiResults = append(apiResults, &ctypes.ResultTx{ Hash: tmtypes.Tx(r.Tx).Hash(), @@ -647,8 +644,8 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } -// BlockSearch defines a method to search for a paginated set of blocks by -// BeginBlock and EndBlock event search criteria. + + func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -660,7 +657,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, err } - // Sort the results + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -675,7 +672,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - // Paginate + totalCount := len(results) perPageVal := validatePerPage(perPage) @@ -687,10 +684,10 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i skipCount := validateSkipCount(pageVal, perPageVal) pageSize := tmmath.MinInt(perPageVal, totalCount-skipCount) - // Fetch the blocks + blocks := make([]*ctypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { - b, err := c.node.Store.LoadBlock(uint64(results[i])) //nolint:gosec // height is non-negative and falls in int64 + b, err := c.node.Store.LoadBlock(uint64(results[i])) if err != nil { return nil, err } @@ -709,11 +706,11 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return &ctypes.ResultBlockSearch{Blocks: blocks, TotalCount: totalCount}, nil } -// Status returns detailed information about current status of the node. + func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { latest, err := c.node.Store.LoadBlock(c.node.GetBlockManagerHeight()) if err != nil { - // TODO(tzdybal): extract error + return nil, fmt.Errorf("find latest block: %w", err) } @@ -739,7 +736,7 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { txIndexerStatus := "on" result := &ctypes.ResultStatus{ - // TODO(ItzhakBokris): update NodeInfo fields + NodeInfo: p2p.DefaultNodeInfo{ ProtocolVersion: defaultProtocolVersion, DefaultNodeID: id, @@ -756,18 +753,18 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash[:], LatestAppHash: latestAppHash[:], - LatestBlockHeight: int64(latestHeight), //nolint:gosec // height is non-negative and falls in int64 + LatestBlockHeight: int64(latestHeight), LatestBlockTime: latestBlockTime, - // CatchingUp is true if the node is not at the latest height received from p2p or da. + CatchingUp: c.node.BlockManager.TargetHeight.Load() > latestHeight, - // TODO(tzdybal): add missing fields - // EarliestBlockHash: earliestBlockHash, - // EarliestAppHash: earliestAppHash, - // EarliestBlockHeight: earliestBloc - // kHeight, - // EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + + + + + + }, - // TODO(ItzhakBokris): update ValidatorInfo fields + ValidatorInfo: ctypes.ValidatorInfo{ Address: tmbytes.HexBytes(proposer.ConsAddress()), PubKey: proposer.PubKey(), @@ -777,14 +774,14 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { return result, nil } -// BroadcastEvidence is not yet implemented. + func (c *Client) BroadcastEvidence(ctx context.Context, evidence tmtypes.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return &ctypes.ResultBroadcastEvidence{ Hash: evidence.Hash(), }, nil } -// NumUnconfirmedTxs returns information about transactions in mempool. + func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: c.node.Mempool.Size(), @@ -793,9 +790,9 @@ func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirm }, nil } -// UnconfirmedTxs returns transactions in mempool. + func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator + limit := validatePerPage(limitPtr) txs := c.node.Mempool.ReapMaxTxs(limit) @@ -807,9 +804,9 @@ func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.Res }, nil } -// CheckTx executes a new transaction against the application to determine its validity. -// -// If valid, the tx is automatically added to the mempool. + + + func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultCheckTx, error) { res, err := c.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { @@ -820,20 +817,20 @@ func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultChec func (c *Client) BlockValidated(height *int64) (*ResultBlockValidated, error) { _, _, chainID := c.node.P2P.Info() - // invalid height + if height == nil || *height < 0 { return &ResultBlockValidated{Result: -1, ChainID: chainID}, nil } - // node has not reached the height yet - if uint64(*height) > c.node.BlockManager.State.Height() { //nolint:gosec // height is non-negative and falls in int64 + + if uint64(*height) > c.node.BlockManager.State.Height() { return &ResultBlockValidated{Result: NotValidated, ChainID: chainID}, nil } - if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { //nolint:gosec // height is non-negative and falls in int64 + if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { return &ResultBlockValidated{Result: SLValidated, ChainID: chainID}, nil } - // block is applied, and therefore it is validated at block level but not at state update level + return &ResultBlockValidated{Result: P2PValidated, ChainID: chainID}, nil } @@ -859,7 +856,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String()) sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped + if sub == nil { return } case <-c.Quit(): @@ -868,7 +865,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm } } -// Try to resubscribe with exponential backoff. + func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscription { attempts := uint(0) for { @@ -882,7 +879,7 @@ func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscr } attempts++ - time.Sleep((10 << attempts) * time.Millisecond) // 10ms -> 20ms -> 40ms + time.Sleep((10 << attempts) * time.Millisecond) } } @@ -907,7 +904,7 @@ func (c *Client) normalizeHeight(height *int64) uint64 { if height == nil || *height == 0 { heightValue = c.node.GetBlockManagerHeight() } else { - heightValue = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 + heightValue = uint64(*height) } return heightValue @@ -924,7 +921,7 @@ func (c *Client) IsSubscriptionAllowed(subscriber string) error { } func validatePerPage(perPagePtr *int) int { - if perPagePtr == nil { // no per_page parameter + if perPagePtr == nil { return defaultPerPage } @@ -942,13 +939,13 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) } - if pagePtr == nil || *pagePtr <= 0 { // no page parameter + if pagePtr == nil || *pagePtr <= 0 { return 1, nil } pages := ((totalCount - 1) / perPage) + 1 if pages == 0 { - pages = 1 // one page (even if it's empty) + pages = 1 } page := *pagePtr if page > pages { @@ -968,12 +965,12 @@ func validateSkipCount(page, perPage int) int { } func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { - // filter negatives + if min < 0 || max < 0 { return min, max, errors.New("height must be greater than zero") } - // adjust for default values + if min == 0 { min = 1 } @@ -981,14 +978,14 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { max = height } - // limit max to the height + max = tmmath.MinInt64(height, max) - // limit min to the base + min = tmmath.MaxInt64(base, min) - // limit min to within `limit` of max - // so the total number of blocks returned will be `limit` + + min = tmmath.MaxInt64(min, max-limit+1) if min > max { diff --git a/rpc/client/utils.go b/rpc/client/utils.go index 894c60547..04ec93e09 100644 --- a/rpc/client/utils.go +++ b/rpc/client/utils.go @@ -8,12 +8,12 @@ import ( ) const ( - // genesisChunkSize is the maximum size, in bytes, of each - // chunk in the genesis structure for the chunked API - genesisChunkSize = 16 * 1024 * 1024 // 16 MiB + + + genesisChunkSize = 16 * 1024 * 1024 ) -// GetGenesisChunks returns chunked version of genesis. + func (c *Client) GetGenesisChunks() ([]string, error) { if c.genChunks != nil { return c.genChunks, nil @@ -26,8 +26,8 @@ func (c *Client) GetGenesisChunks() ([]string, error) { return c.genChunks, err } -// initGenesisChunks creates a chunked format of the genesis document to make it easier to -// iterate through larger genesis structures. + + func (c *Client) initGenesisChunks(genesis *tmtypes.GenesisDoc) error { if genesis == nil { return nil diff --git a/rpc/json/handler.go b/rpc/json/handler.go index af33eed97..46d70f126 100644 --- a/rpc/json/handler.go +++ b/rpc/json/handler.go @@ -49,21 +49,21 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mux.ServeHTTP(w, r) } -// serveJSONRPC serves HTTP request + func (h *handler) serveJSONRPC(w http.ResponseWriter, r *http.Request) { h.serveJSONRPCforWS(w, r, nil) } -// serveJSONRPC serves HTTP request -// implementation is highly inspired by Gorilla RPC v2 (but simplified a lot) + + func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsConn *wsConn) { - // Create a new codec request. + codecReq := h.codec.NewRequest(r) - // Get service method to be called. + method, err := codecReq.Method() if err != nil { if e, ok := err.(*json2.Error); method == "" && ok && e.Message == "EOF" { - // just serve empty page if request is empty + return } codecReq.WriteError(w, http.StatusBadRequest, err) @@ -76,7 +76,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo return } - // Decode the args. + args := reflect.New(methodSpec.argsType) if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { codecReq.WriteError(w, http.StatusBadRequest, errRead) @@ -98,7 +98,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo } rets := methodSpec.m.Call(callArgs) - // Extract the result to error if needed. + var errResult error statusCode := http.StatusOK errInter := rets[1].Interface() @@ -107,11 +107,11 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo errResult, _ = errInter.(error) } - // Prevents Internet Explorer from MIME-sniffing a response away - // from the declared content-type + + w.Header().Set("x-content-type-options", "nosniff") - // Encode the response. + if errResult == nil { var raw json.RawMessage raw, err = tmjson.Marshal(rets[0].Interface()) @@ -153,7 +153,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http case reflect.String: args.Elem().Field(i).SetString(rawVal) case reflect.Slice: - // []byte is a reflect.Slice of reflect.Uint8's + if field.Type.Elem().Kind() == reflect.Uint8 { err = setByteSliceParam(rawVal, &args, i) } @@ -172,7 +172,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http args, }) - // Extract the result to error if needed. + statusCode := http.StatusOK errInter := rets[1].Interface() if errInter != nil { @@ -185,8 +185,8 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http } func (h *handler) encodeAndWriteResponse(w http.ResponseWriter, result interface{}, errResult error, statusCode int) { - // Prevents Internet Explorer from MIME-sniffing a response away - // from the declared content-type + + w.Header().Set("x-content-type-options", "nosniff") w.Header().Set("Content-Type", "application/json; charset=utf-8") diff --git a/rpc/json/service.go b/rpc/json/service.go index e9c1c8e08..e1952f770 100644 --- a/rpc/json/service.go +++ b/rpc/json/service.go @@ -20,13 +20,13 @@ import ( ) const ( - // defaultSubscribeTimeout is the default timeout for a subscription. + defaultSubscribeTimeout = 5 * time.Second - // defaultSubscribeBufferSize is the default buffer size for a subscription. + defaultSubscribeBufferSize = 100 ) -// GetHTTPHandler returns handler configured to serve Tendermint-compatible RPC. + func GetHTTPHandler(l *client.Client, logger types.Logger, opts ...option) (http.Handler, error) { return newHandler(newService(l, logger, opts...), json2.NewCodec(), logger), nil } @@ -137,9 +137,9 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } go func(subscriptionID []byte) { for msg := range out { - // build the base response + var resp rpctypes.RPCResponse - // Check if subscriptionID is string or int and generate the rest of the response accordingly + subscriptionIDInt, err := strconv.Atoi(string(subscriptionID)) if err != nil { s.logger.Info("Failed to convert subscriptionID to int") @@ -147,7 +147,7 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } else { resp = rpctypes.NewRPCSuccessResponse(rpctypes.JSONRPCIntID(subscriptionIDInt), msg) } - // Marshal response to JSON and send it to the websocket queue + jsonBytes, err := json.MarshalIndent(resp, "", " ") if err != nil { s.logger.Error("marshal RPCResponse to JSON", "err", err) @@ -180,7 +180,7 @@ func (s *service) UnsubscribeAll(req *http.Request, args *unsubscribeAllArgs) (* return &emptyResult{}, nil } -// info API + func (s *service) Health(req *http.Request, args *healthArgs) (*ctypes.ResultHealth, error) { return s.client.Health(req.Context()) } @@ -202,7 +202,7 @@ func (s *service) Genesis(req *http.Request, args *genesisArgs) (*ctypes.ResultG } func (s *service) GenesisChunked(req *http.Request, args *genesisChunkedArgs) (*ctypes.ResultGenesisChunk, error) { - return s.client.GenesisChunked(req.Context(), uint(args.ID)) //nolint:gosec // id is always positive + return s.client.GenesisChunked(req.Context(), uint(args.ID)) } func (s *service) Block(req *http.Request, args *blockArgs) (*ctypes.ResultBlock, error) { @@ -261,7 +261,7 @@ func (s *service) NumUnconfirmedTxs(req *http.Request, args *numUnconfirmedTxsAr return s.client.NumUnconfirmedTxs(req.Context()) } -// tx broadcast API + func (s *service) BroadcastTxCommit(req *http.Request, args *broadcastTxCommitArgs) (*ctypes.ResultBroadcastTxCommit, error) { return s.client.BroadcastTxCommit(req.Context(), args.Tx) } @@ -274,7 +274,7 @@ func (s *service) BroadcastTxAsync(req *http.Request, args *broadcastTxAsyncArgs return s.client.BroadcastTxAsync(req.Context(), args.Tx) } -// abci API + func (s *service) ABCIQuery(req *http.Request, args *ABCIQueryArgs) (*ctypes.ResultABCIQuery, error) { return s.client.ABCIQueryWithOptions(req.Context(), args.Path, args.Data, rpcclient.ABCIQueryOptions{ Height: int64(args.Height), @@ -286,7 +286,7 @@ func (s *service) ABCIInfo(req *http.Request, args *ABCIInfoArgs) (*ctypes.Resul return s.client.ABCIInfo(req.Context()) } -// evidence API + func (s *service) BroadcastEvidence(req *http.Request, args *broadcastEvidenceArgs) (*ctypes.ResultBroadcastEvidence, error) { return s.client.BroadcastEvidence(req.Context(), args.Evidence) } diff --git a/rpc/json/types.go b/rpc/json/types.go index 19f1f8513..23e84dff6 100644 --- a/rpc/json/types.go +++ b/rpc/json/types.go @@ -18,7 +18,7 @@ type unsubscribeArgs struct { } type unsubscribeAllArgs struct{} -// info API + type ( healthArgs struct{} statusArgs struct{} @@ -86,7 +86,7 @@ type unconfirmedTxsArgs struct { } type numUnconfirmedTxsArgs struct{} -// tx broadcast API + type broadcastTxCommitArgs struct { Tx types.Tx `json:"tx"` } @@ -97,9 +97,9 @@ type broadcastTxAsyncArgs struct { Tx types.Tx `json:"tx"` } -// abci API -// ABCIQueryArgs defines args for ABCI Query method. + + type ABCIQueryArgs struct { Path string `json:"path"` Data bytes.HexBytes `json:"data"` @@ -107,10 +107,10 @@ type ABCIQueryArgs struct { Prove bool `json:"prove"` } -// ABCIInfoArgs defines args for ABCI Info method. + type ABCIInfoArgs struct{} -// evidence API + type broadcastEvidenceArgs struct { Evidence types.Evidence `json:"evidence"` @@ -118,20 +118,20 @@ type broadcastEvidenceArgs struct { type emptyResult struct{} -// JSON-deserialization specific types -// StrInt is an proper int or quoted "int" + + type StrInt int -// StrInt64 is an proper int64 or quoted "int64" + type StrInt64 int64 -// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt64 + func (s *StrInt64) UnmarshalJSON(b []byte) error { return unmarshalStrInt64(b, s) } -// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt + func (s *StrInt) UnmarshalJSON(b []byte) error { var val StrInt64 err := unmarshalStrInt64(b, &val) diff --git a/rpc/json/ws.go b/rpc/json/ws.go index a086ba980..a9728e5a9 100644 --- a/rpc/json/ws.go +++ b/rpc/json/ws.go @@ -40,7 +40,7 @@ func (wsc *wsConn) sendLoop() { } func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { - // TODO(tzdybal): configuration options + upgrader := websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, @@ -89,7 +89,7 @@ func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { } if mt != websocket.TextMessage { - // TODO(tzdybal): https://github.com/dymensionxyz/dymint/issues/465 + h.logger.Debug("expected text message") continue } @@ -111,14 +111,14 @@ func newResponseWriter(w io.Writer) http.ResponseWriter { return &wsResponse{w} } -// wsResponse is a simple implementation of http.ResponseWriter + type wsResponse struct { w io.Writer } var _ http.ResponseWriter = wsResponse{} -// Write use underlying writer to write response to WebSocket + func (w wsResponse) Write(bytes []byte) (int, error) { return w.w.Write(bytes) } diff --git a/rpc/middleware/client.go b/rpc/middleware/client.go index 32c232564..6d175fb2b 100644 --- a/rpc/middleware/client.go +++ b/rpc/middleware/client.go @@ -6,14 +6,14 @@ import ( "github.com/tendermint/tendermint/libs/log" ) -// Client is a struct that holds registered middlewares and provides methods -// to run these middlewares on an HTTP handler. + + type Client struct { registry *Registry logger log.Logger } -// NewClient creates and returns a new Client instance. + func NewClient(reg Registry, logger log.Logger) *Client { return &Client{ registry: ®, @@ -21,7 +21,7 @@ func NewClient(reg Registry, logger log.Logger) *Client { } } -// Handle wraps the provided http.Handler with the registered middlewares and returns the final http.Handler. + func (mc *Client) Handle(h http.Handler) http.Handler { registeredMiddlewares := mc.registry.GetRegistered() finalHandler := h diff --git a/rpc/middleware/registry.go b/rpc/middleware/registry.go index 9cbf9a795..70a1b2222 100644 --- a/rpc/middleware/registry.go +++ b/rpc/middleware/registry.go @@ -12,20 +12,20 @@ var ( instance *Registry ) -// HandlerFunc is a type alias for a function that takes an http.Handler and returns a new http.Handler. + type HandlerFunc func(http.Handler) http.Handler -// Middleware is an interface representing a middleware with a Handler method. + type Middleware interface { Handler(logger log.Logger) HandlerFunc } -// Registry is a struct that holds a list of registered middlewares. + type Registry struct { middlewareList []Middleware } -// GetRegistry returns a singleton instance of the Registry. + func GetRegistry() *Registry { once.Do(func() { instance = &Registry{} @@ -33,12 +33,12 @@ func GetRegistry() *Registry { return instance } -// Register adds a Middleware to the list of registered middlewares in the Registry. + func (r *Registry) Register(m Middleware) { r.middlewareList = append(r.middlewareList, m) } -// GetRegistered returns a list of registered middlewares. + func (r *Registry) GetRegistered() []Middleware { return r.middlewareList } diff --git a/rpc/middleware/status.go b/rpc/middleware/status.go index 01e16e559..16172aa48 100644 --- a/rpc/middleware/status.go +++ b/rpc/middleware/status.go @@ -16,7 +16,7 @@ func (s Status) Handler(logger log.Logger) HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := s.Err() isHealthy := err == nil - // in case the endpoint is health we return health response + if r.URL.Path == "/health" { w.WriteHeader(http.StatusOK) diff --git a/rpc/server.go b/rpc/server.go index 6368d4ff1..9eafb9f91 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -26,7 +26,7 @@ import ( "github.com/dymensionxyz/dymint/rpc/middleware" ) -// Server handles HTTP and JSON-RPC requests, exposing Tendermint-compatible API. + type Server struct { *service.BaseService @@ -43,21 +43,21 @@ type Server struct { const ( onStopTimeout = 5 * time.Second - // readHeaderTimeout is the timeout for reading the request headers. + readHeaderTimeout = 5 * time.Second ) -// Option is a function that configures the Server. + type Option func(*Server) -// WithListener is an option that sets the listener. + func WithListener(listener net.Listener) Option { return func(d *Server) { d.listener = listener } } -// NewServer creates new instance of Server with given configuration. + func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, options ...Option) *Server { srv := &Server{ config: config, @@ -66,16 +66,16 @@ func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, opt } srv.BaseService = service.NewBaseService(logger, "RPC", srv) - // Apply options + for _, option := range options { option(srv) } return srv } -// Client returns a Tendermint-compatible rpc Client instance. -// -// This method is called in cosmos-sdk. + + + func (s *Server) Client() rpcclient.Client { return s.client } @@ -84,13 +84,13 @@ func (s *Server) PubSubServer() *pubsub.Server { return s.node.PubSubServer() } -// OnStart is called when Server is started (see service.BaseService for details). + func (s *Server) OnStart() error { s.startEventListener() return s.startRPC() } -// OnStop is called when Server is stopped (see service.BaseService for details). + func (s *Server) OnStop() { ctx, cancel := context.WithTimeout(context.Background(), onStopTimeout) defer cancel() @@ -99,12 +99,12 @@ func (s *Server) OnStop() { } } -// startEventListener registers events to callbacks. + func (s *Server) startEventListener() { go uevent.MustSubscribe(context.Background(), s.PubSubServer(), "RPCNodeHealthStatusHandler", events.QueryHealthStatus, s.onNodeHealthUpdate, s.Logger) } -// onNodeHealthUpdate is a callback function that handles health status events from the node. + func (s *Server) onNodeHealthUpdate(event pubsub.Message) { eventData, _ := event.Data().(*events.DataHealthStatus) if eventData.Error != nil { @@ -169,13 +169,13 @@ func (s *Server) startRPC() error { handler = c.Handler(handler) } - // Apply Middleware + reg := middleware.GetRegistry() reg.Register(middleware.Status{Err: s.getHealthStatus}) middlewareClient := middleware.NewClient(*reg, s.Logger.With("module", "rpc/middleware")) handler = middlewareClient.Handle(handler) - // Start HTTP server + go func() { err := s.serve(listener, handler) if !errors.Is(err, http.ErrServerClosed) { diff --git a/settlement/config.go b/settlement/config.go index 3f01909e0..4895849fd 100644 --- a/settlement/config.go +++ b/settlement/config.go @@ -5,7 +5,7 @@ import ( "time" ) -// Config for the DymensionLayerClient + type Config struct { KeyringBackend string `mapstructure:"keyring_backend"` NodeAddress string `mapstructure:"settlement_node_address"` @@ -19,9 +19,9 @@ type Config struct { RetryMinDelay time.Duration `mapstructure:"retry_min_delay"` BatchAcceptanceTimeout time.Duration `mapstructure:"batch_acceptance_timeout"` BatchAcceptanceAttempts uint `mapstructure:"batch_acceptance_attempts"` - // For testing only. probably should be refactored + ProposerPubKey string `json:"proposer_pub_key"` - // Config used for sl shared grpc mock + SLGrpc GrpcConfig `mapstructure:",squash"` } diff --git a/settlement/dymension/cosmosclient.go b/settlement/dymension/cosmosclient.go index 3e90eb499..7feaab2f3 100644 --- a/settlement/dymension/cosmosclient.go +++ b/settlement/dymension/cosmosclient.go @@ -17,10 +17,10 @@ import ( sequencertypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// CosmosClient is an interface for interacting with cosmos client chains. -// It is a wrapper around the cosmos client in order to provide with an interface which can be implemented by -// other clients and can easily be mocked for testing purposes. -// Currently it contains only the methods that are used by the dymension hub client. + + + + type CosmosClient interface { Context() sdkclient.Context StartEventListener() error @@ -41,7 +41,7 @@ type cosmosClient struct { var _ CosmosClient = &cosmosClient{} -// NewCosmosClient creates a new cosmos client + func NewCosmosClient(client cosmosclient.Client) CosmosClient { return &cosmosClient{client} } diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index 6a995ef69..101aab439 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -38,7 +38,7 @@ const ( postBatchSubscriberPrefix = "postBatchSubscriber" ) -// Client is the client for the Dymension Hub. + type Client struct { config *settlement.Config rollappId string @@ -58,7 +58,7 @@ type Client struct { var _ settlement.ClientI = &Client{} -// Init is called once. it initializes the struct members. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { interfaceRegistry := cdctypes.NewInterfaceRegistry() cryptocodec.RegisterInterfaces(interfaceRegistry) @@ -76,7 +76,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub c.retryMinDelay = config.RetryMinDelay c.retryMaxDelay = config.RetryMaxDelay - // Apply options + for _, apply := range options { apply(c) } @@ -96,7 +96,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub return nil } -// Start starts the HubClient. + func (c *Client) Start() error { err := c.cosmosClient.StartEventListener() if err != nil { @@ -106,31 +106,31 @@ func (c *Client) Start() error { return nil } -// Stop stops the HubClient. + func (c *Client) Stop() error { return c.cosmosClient.StopEventListener() } -// SubmitBatch posts a batch to the Dymension Hub. it tries to post the batch until it is accepted by the settlement layer. -// it emits success and failure events to the event bus accordingly. + + func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.ResultSubmitBatch) error { msgUpdateState, err := c.convertBatchToMsgUpdateState(batch, daResult) if err != nil { return fmt.Errorf("convert batch to msg update state: %w", err) } - // TODO: probably should be changed to be a channel, as the eventHandler is also in the HubClient in he produces the event + postBatchSubscriberClient := fmt.Sprintf("%s-%d-%s", postBatchSubscriberPrefix, batch.StartHeight(), uuid.New().String()) subscription, err := c.pubsub.Subscribe(c.ctx, postBatchSubscriberClient, settlement.EventQueryNewSettlementBatchAccepted, 1000) if err != nil { return fmt.Errorf("pub sub subscribe to settlement state updates: %w", err) } - //nolint:errcheck + defer c.pubsub.UnsubscribeAll(c.ctx, postBatchSubscriberClient) for { - // broadcast loop: broadcast the transaction to the blockchain (with infinite retries). + err := c.RunWithRetryInfinitely(func() error { err := c.broadcastBatch(msgUpdateState) if err != nil { @@ -154,7 +154,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul return fmt.Errorf("broadcast batch: %w", err) } - // Batch was submitted successfully. Wait for it to be accepted by the settlement layer. + timer := time.NewTimer(c.batchAcceptanceTimeout) defer timer.Stop() attempt := uint64(1) @@ -171,20 +171,20 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul eventData, _ := event.Data().(*settlement.EventDataNewBatch) if eventData.EndHeight != batch.EndHeight() { c.logger.Debug("Received event for a different batch, ignoring.", "event", eventData) - continue // continue waiting for acceptance of the current batch + continue } c.logger.Info("Batch accepted.", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight(), "stateIndex", eventData.StateIndex, "dapath", msgUpdateState.DAPath) return nil case <-timer.C: - // Check if the batch was accepted by the settlement layer, and we've just missed the event. + includedBatch, err := c.pollForBatchInclusion(batch.EndHeight()) timer.Reset(c.batchAcceptanceTimeout) - // no error, but still not included + if err == nil && !includedBatch { attempt++ if attempt <= uint64(c.batchAcceptanceAttempts) { - continue // continue waiting for acceptance of the current batch + continue } c.logger.Error( "Timed out waiting for batch inclusion on settlement layer", @@ -193,7 +193,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "endHeight", batch.EndHeight(), ) - break // breaks the switch case, and goes back to the broadcast loop + break } if err != nil { c.logger.Error( @@ -205,13 +205,13 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "error", err, ) - continue // continue waiting for acceptance of the current batch + continue } - // all good + c.logger.Info("Batch accepted", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight()) return nil } - break // failed waiting for acceptance. broadcast the batch again + break } } } @@ -237,7 +237,7 @@ func (c *Client) getStateInfo(index, height *uint64) (res *rollapptypes.QueryGet if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { // not supposed to happen + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return @@ -259,13 +259,13 @@ func (c *Client) getLatestHeight(finalized bool) (res *rollapptypes.QueryGetLate if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { // not supposed to happen + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return } -// GetLatestBatch returns the latest batch from the Dymension Hub. + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, nil) if err != nil { @@ -274,7 +274,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetBatchAtIndex returns the batch at the given index from the Dymension Hub. + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(&index, nil) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetBatchAtHeight returns the batch at the given height from the Dymension Hub. + func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, &height) if err != nil { @@ -292,7 +292,7 @@ func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatc return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { res, err := c.getLatestHeight(false) if err != nil { @@ -301,7 +301,7 @@ func (c *Client) GetLatestHeight() (uint64, error) { return res.Height, nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { res, err := c.getLatestHeight(true) if err != nil { @@ -310,16 +310,16 @@ func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return res.Height, nil } -// GetProposerAtHeight return the proposer at height. -// In case of negative height, it will return the latest proposer. + + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { - // Get all sequencers to find the proposer address + seqs, err := c.GetAllSequencers() if err != nil { return nil, fmt.Errorf("get bonded sequencers: %w", err) } - // Get either latest proposer or proposer at height + var proposerAddr string if height < 0 { proposerAddr, err = c.getLatestProposer() @@ -327,12 +327,12 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("get latest proposer: %w", err) } } else { - // Get the state info for the relevant height and get address from there + res, err := c.GetBatchAtHeight(uint64(height)) - // if case of height not found, it may be because it didn't arrive to the hub yet. - // In that case we want to return the current proposer. + + if err != nil { - // If batch not found, fallback to latest proposer + if errors.Is(err, gerrc.ErrNotFound) { proposerAddr, err = c.getLatestProposer() if err != nil { @@ -350,7 +350,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer is sentinel") } - // Find and return the matching sequencer + for _, seq := range seqs { if seq.SettlementAddress == proposerAddr { return &seq, nil @@ -359,7 +359,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer not found") } -// GetSequencerByAddress returns a sequencer by its address. + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { var res *sequencertypes.QueryGetSequencerResponse req := &sequencertypes.QueryGetSequencerRequest{ @@ -402,7 +402,7 @@ func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) ), nil } -// GetAllSequencers returns all sequencers of the given rollapp. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappResponse req := &sequencertypes.QueryGetSequencersByRollappRequest{ @@ -425,7 +425,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return nil, err } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -455,7 +455,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return sequencerList, nil } -// GetBondedSequencers returns the bonded sequencers of the given rollapp. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappByStatusResponse req := &sequencertypes.QueryGetSequencersByRollappByStatusRequest{ @@ -479,7 +479,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return nil, err } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -508,10 +508,10 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return sequencerList, nil } -// GetNextProposer returns the next proposer on the hub. -// In case the current proposer is the next proposer, it returns nil. -// in case there is no next proposer, it returns an empty sequencer struct. -// in case there is a next proposer, it returns the next proposer. + + + + func (c *Client) GetNextProposer() (*types.Sequencer, error) { var ( nextAddr string @@ -577,7 +577,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -586,7 +586,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return &rollapp, nil } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { var res *rollapptypes.QueryObsoleteDRSVersionsResponse req := &rollapptypes.QueryObsoleteDRSVersionsRequest{} @@ -606,7 +606,7 @@ func (c *Client) GetObsoleteDrs() ([]uint32, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -694,7 +694,7 @@ func getCosmosClientOptions(config *settlement.Config) []cosmosclient.Option { return options } -// pollForBatchInclusion polls the hub for the inclusion of a batch with the given end height. + func (c *Client) pollForBatchInclusion(batchEndHeight uint64) (bool, error) { latestBatch, err := c.GetLatestBatch() if err != nil { @@ -768,7 +768,7 @@ func (c *Client) ValidateGenesisBridgeData(data rollapptypes.GenesisBridgeData) return fmt.Errorf("rollapp client: validate genesis bridge: %w", err) } - // not supposed to happen, but just in case + if res == nil { return fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } diff --git a/settlement/dymension/events.go b/settlement/dymension/events.go index ba0a2849e..29280911a 100644 --- a/settlement/dymension/events.go +++ b/settlement/dymension/events.go @@ -12,7 +12,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) -// TODO: use types and attributes from dymension proto + const ( eventStateUpdateFmt = "state_update.rollapp_id='%s' AND state_update.status='PENDING'" eventStateUpdateFinalizedFmt = "state_update.rollapp_id='%s' AND state_update.status='FINALIZED'" @@ -42,7 +42,7 @@ func (c *Client) eventHandler() { eventRotationStartedQ := fmt.Sprintf(eventRotationStartedFmt, c.rollappId) eventStateUpdateFinalizedQ := fmt.Sprintf(eventStateUpdateFinalizedFmt, c.rollappId) - // TODO: add validation callback for the event data + eventMap := map[string]string{ eventStateUpdateQ: settlement.EventNewBatchAccepted, eventSequencersListQ: settlement.EventNewBondedSequencer, @@ -66,7 +66,7 @@ func (c *Client) eventHandler() { if err != nil { panic(fmt.Errorf("subscribe to events (%s): %w", eventStateUpdateFinalizedQ, err)) } - defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) //nolint:errcheck + defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) for { var e ctypes.ResultEvent @@ -74,7 +74,7 @@ func (c *Client) eventHandler() { case <-c.ctx.Done(): return case <-c.cosmosClient.EventListenerQuit(): - // TODO(omritoptix): Fallback to polling + return case e = <-stateUpdatesC: case e = <-sequencersListC: @@ -86,7 +86,7 @@ func (c *Client) eventHandler() { } func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[string]string) { - // Assert value is in map and publish it to the event bus + internalType, ok := eventMap[event.Query] if !ok { c.logger.Error("Ignoring event. Type not supported.", "event", event) @@ -105,7 +105,7 @@ func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[stri func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBatch, error) { var errs []error - // check all expected attributes exists + events := rawEventData.Events if events["state_update.num_blocks"] == nil || events["state_update.start_height"] == nil || events["state_update.state_info_index"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -137,12 +137,12 @@ func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventD } func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBondedSequencer, error) { - // check all expected attributes exists + events := rawEventData.Events if events["create_sequencer.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - // TODO: validate rollappID + if events["create_sequencer.sequencer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -154,13 +154,13 @@ func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.Ev } func convertToRotationStartedEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataRotationStarted, error) { - // check all expected attributes exists + events := rawEventData.Events if events["proposer_rotation_started.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - // TODO: validate rollappID + if events["proposer_rotation_started.next_proposer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") diff --git a/settlement/dymension/options.go b/settlement/dymension/options.go index 94ffa07c3..00cc5be2d 100644 --- a/settlement/dymension/options.go +++ b/settlement/dymension/options.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) -// WithCosmosClient is an option that sets the CosmosClient. + func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -14,7 +14,7 @@ func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { } } -// WithRetryAttempts is an option that sets the number of attempts to retry when interacting with the settlement layer. + func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -22,7 +22,7 @@ func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { } } -// WithBatchAcceptanceTimeout is an option that sets the timeout for waiting for a batch to be accepted by the settlement layer. + func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -30,7 +30,7 @@ func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement } } -// WithBatchAcceptanceAttempts is an option that sets the number of attempts to check if a batch has been accepted by the settlement layer. + func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -38,7 +38,7 @@ func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option } } -// WithRetryMinDelay is an option that sets the retry function mindelay between hub retry attempts. + func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -46,7 +46,7 @@ func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { } } -// WithRetryMaxDelay is an option that sets the retry function max delay between hub retry attempts. + func WithRetryMaxDelay(retryMaxDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) diff --git a/settlement/dymension/utils.go b/settlement/dymension/utils.go index def62fb91..6dbbae0a7 100644 --- a/settlement/dymension/utils.go +++ b/settlement/dymension/utils.go @@ -8,8 +8,8 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// RunWithRetry runs the given operation with retry, doing a number of attempts, and taking the last -// error only. It uses the context of the HubClient. + + func (c *Client) RunWithRetry(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), @@ -20,8 +20,8 @@ func (c *Client) RunWithRetry(operation func() error) error { ) } -// RunWithRetryInfinitely runs the given operation with retry, doing a number of attempts, and taking the last -// error only. It uses the context of the HubClient. + + func (c *Client) RunWithRetryInfinitely(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), diff --git a/settlement/errors.go b/settlement/errors.go index b2b4073b7..55496c242 100644 --- a/settlement/errors.go +++ b/settlement/errors.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// ErrBatchNotAccepted is returned when a batch is not accepted by the settlement layer. + var ErrBatchNotAccepted = fmt.Errorf("batch not accepted: %w", gerrc.ErrUnknown) type ErrNextSequencerAddressFraud struct { diff --git a/settlement/events.go b/settlement/events.go index 2ff811410..931df574f 100644 --- a/settlement/events.go +++ b/settlement/events.go @@ -7,17 +7,17 @@ import ( ) const ( - // EventTypeKey is a reserved composite key for event name. + EventTypeKey = "settlement.event" - // Event types + EventNewBatchAccepted = "NewBatchAccepted" EventNewBondedSequencer = "NewBondedSequencer" EventRotationStarted = "RotationStarted" EventNewBatchFinalized = "NewBatchFinalized" ) -// Convenience objects + var ( EventNewBatchAcceptedList = map[string][]string{EventTypeKey: {EventNewBatchAccepted}} EventNewBondedSequencerList = map[string][]string{EventTypeKey: {EventNewBondedSequencer}} @@ -25,7 +25,7 @@ var ( EventNewBatchFinalizedList = map[string][]string{EventTypeKey: {EventNewBatchFinalized}} ) -// Queries + var ( EventQueryNewSettlementBatchAccepted = uevent.QueryFor(EventTypeKey, EventNewBatchAccepted) EventQueryNewSettlementBatchFinalized = uevent.QueryFor(EventTypeKey, EventNewBatchFinalized) @@ -33,13 +33,13 @@ var ( EventQueryRotationStarted = uevent.QueryFor(EventTypeKey, EventRotationStarted) ) -// Data + type EventDataNewBatch struct { StartHeight uint64 - // EndHeight is the height of the last accepted batch + EndHeight uint64 - // StateIndex is the rollapp-specific index the batch was saved in the SL + StateIndex uint64 } diff --git a/settlement/grpc/grpc.go b/settlement/grpc/grpc.go index c09c72798..45c5deef0 100644 --- a/settlement/grpc/grpc.go +++ b/settlement/grpc/grpc.go @@ -36,8 +36,8 @@ const ( addressPrefix = "dym" ) -// Client is an extension of the base settlement layer client -// for usage in tests and local development. + + type Client struct { ctx context.Context rollappID string @@ -59,14 +59,14 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { }, nil } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } var _ settlement.ClientI = (*Client)(nil) -// Init initializes the mock layer client. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { ctx := context.Background() @@ -149,7 +149,7 @@ func initConfig(conf settlement.Config) (proposer string, err error) { return } -// Start starts the mock client + func (c *Client) Start() error { c.logger.Info("Starting grpc mock settlement") @@ -159,7 +159,7 @@ func (c *Client) Start() error { for { select { case <-c.stopchan: - // stop + return case <-tick.C: index, err := c.sl.GetIndex(c.ctx, &slmock.SLGetIndexRequest{}) @@ -185,14 +185,14 @@ func (c *Client) Start() error { return nil } -// Stop stops the mock client + func (c *Client) Stop() error { c.logger.Info("Stopping grpc mock settlement") close(c.stopchan) return nil } -// SubmitBatch saves the batch to the kv store + func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchtoSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -200,7 +200,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(10 * time.Millisecond) // mimic a delay in batch acceptance + time.Sleep(10 * time.Millisecond) err = c.pubsub.PublishWithEvents(context.Background(), &settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) if err != nil { return err @@ -208,7 +208,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return nil } -// GetLatestBatch returns the latest batch from the kv store + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.logger.Info("GetLatestBatch grpc", "index", c.slStateIndex) batchResult, err := c.GetBatchAtIndex(atomic.LoadUint64(&c.slStateIndex)) @@ -218,7 +218,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } -// GetBatchAtIndex returns the batch at the given index + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -230,7 +230,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, } func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { - // Binary search implementation + left, right := uint64(1), c.slStateIndex for left <= right { @@ -256,7 +256,7 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return nil, gerrc.ErrNotFound } -// GetProposerAtHeight implements settlement.ClientI. + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -279,17 +279,17 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } -// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in grpc SL + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in grpc SL") } -// GetAllSequencers implements settlement.ClientI. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } -// GetBondedSequencers implements settlement.ClientI. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -298,17 +298,17 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } -// GetNextProposer implements settlement.ClientI. + func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight.Load(), nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } @@ -320,7 +320,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { if err != nil { return err } - // Save the batch to the next state index + c.logger.Debug("Saving batch to grpc settlement layer", "index", c.slStateIndex+1) setBatchReply, err := c.sl.SetBatch(c.ctx, &slmock.SLSetBatchRequest{Index: c.slStateIndex + 1, Batch: b}) if err != nil { @@ -337,7 +337,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { return err } c.logger.Debug("Setting grpc SL Index to ", "index", setIndexReply.GetIndex()) - // Save latest height in memory and in store + c.latestHeight.Store(batch.EndHeight) return nil } diff --git a/settlement/local/local.go b/settlement/local/local.go index 4d8a64664..20d3ec8ee 100644 --- a/settlement/local/local.go +++ b/settlement/local/local.go @@ -38,18 +38,18 @@ const ( var ( settlementKVPrefix = []byte{0} - slStateIndexKey = []byte("slStateIndex") // used to recover after reboot + slStateIndexKey = []byte("slStateIndex") ) -// Client is an extension of the base settlement layer client -// for usage in tests and local development. + + type Client struct { rollappID string ProposerPubKey string logger types.Logger pubsub *pubsub.Server - mu sync.Mutex // keep the following in sync with *each other* + mu sync.Mutex slStateIndex uint64 latestHeight uint64 settlementKV store.KV @@ -64,7 +64,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { var _ settlement.ClientI = (*Client)(nil) -// Init initializes the mock layer client. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { slstore, proposer, err := initConfig(config) if err != nil { @@ -77,7 +77,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub b, err := settlementKV.Get(slStateIndexKey) if err == nil { slStateIndex = binary.BigEndian.Uint64(b) - // Get the latest height from the stateIndex + var settlementBatch rollapptypes.MsgUpdateState b, err := settlementKV.Get(keyFromIndex(slStateIndex)) if err != nil { @@ -101,9 +101,9 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err error) { if conf.KeyringHomeDir == "" { - // init store + slstore = store.NewDefaultInMemoryKVStore() - // init proposer pub key + if conf.ProposerPubKey != "" { proposer = conf.ProposerPubKey } else { @@ -135,17 +135,17 @@ func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err return } -// Start starts the mock client + func (c *Client) Start() error { return nil } -// Stop stops the mock client + func (c *Client) Stop() error { return c.settlementKV.Close() } -// PostBatch saves the batch to the kv store + func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchToSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -153,14 +153,14 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(100 * time.Millisecond) // mimic a delay in batch acceptance + time.Sleep(100 * time.Millisecond) ctx := context.Background() uevent.MustPublish(ctx, c.pubsub, settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) return nil } -// GetLatestBatch returns the latest batch from the kv store + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() ix := c.slStateIndex @@ -172,17 +172,17 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight, nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } -// GetBatchAtIndex returns the batch at the given index + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -196,7 +196,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() defer c.mu.Unlock() - // TODO: optimize (binary search, or just make another index) + for i := c.slStateIndex; i > 0; i-- { b, err := c.GetBatchAtIndex(i) if err != nil { @@ -208,10 +208,10 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return b, nil } } - return nil, gerrc.ErrNotFound // TODO: need to return a cosmos specific error? + return nil, gerrc.ErrNotFound } -// GetProposerAtHeight implements settlement.ClientI. + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -234,22 +234,22 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } -// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in mock SL + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in local SL") } -// GetAllSequencers implements settlement.ClientI. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } -// GetBondedSequencers implements settlement.ClientI. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -258,7 +258,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } -// GetNextProposer implements settlement.ClientI. + func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } @@ -274,7 +274,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { c.mu.Lock() defer c.mu.Unlock() - // Save the batch to the next state index + c.slStateIndex++ err = c.settlementKV.Set(keyFromIndex(c.slStateIndex), b) if err != nil { diff --git a/settlement/registry/registry.go b/settlement/registry/registry.go index 9649f5c5b..c8bdbe5e5 100644 --- a/settlement/registry/registry.go +++ b/settlement/registry/registry.go @@ -7,26 +7,26 @@ import ( "github.com/dymensionxyz/dymint/settlement/local" ) -// Client represents a settlement layer client + type Client string const ( - // Local is a mock client for the settlement layer + Local Client = "mock" - // Dymension is a client for interacting with dymension settlement layer + Dymension Client = "dymension" - // Mock client using grpc for a shared use + Grpc Client = "grpc" ) -// A central registry for all Settlement Layer Clients + var clients = map[Client]func() settlement.ClientI{ Local: func() settlement.ClientI { return &local.Client{} }, Dymension: func() settlement.ClientI { return &dymension.Client{} }, Grpc: func() settlement.ClientI { return &grpc.Client{} }, } -// GetClient returns client identified by name. + func GetClient(client Client) settlement.ClientI { f, ok := clients[client] if !ok { @@ -35,7 +35,7 @@ func GetClient(client Client) settlement.ClientI { return f() } -// RegisteredClients returns names of all settlement clients in registry. + func RegisteredClients() []Client { registered := make([]Client, 0, len(clients)) for client := range clients { diff --git a/settlement/settlement.go b/settlement/settlement.go index 4b03327a2..fbbbf9a63 100644 --- a/settlement/settlement.go +++ b/settlement/settlement.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// StatusCode is a type for settlement layer return status. + type StatusCode uint64 -// settlement layer return codes. + const ( StatusUnknown StatusCode = iota StatusSuccess @@ -20,12 +20,12 @@ const ( ) type ResultBase struct { - // Code is to determine if the action succeeded. + Code StatusCode - // Message may contain settlement layer specific information (like detailed error message, etc) + Message string - // TODO(omritoptix): Move StateIndex to be part of the batch struct - // StateIndex is the rollapp-specific index the batch was saved in the SL + + StateIndex uint64 } @@ -34,16 +34,16 @@ type BatchMetaData struct { } type Batch struct { - // sequencer is the bech32-encoded address of the sequencer sent the update + Sequencer string StartHeight uint64 EndHeight uint64 BlockDescriptors []rollapp.BlockDescriptor NextSequencer string - // MetaData about the batch in the DA layer + MetaData *BatchMetaData - NumBlocks uint64 // FIXME: can be removed. not used and will be deprecated + NumBlocks uint64 } type ResultRetrieveBatch struct { @@ -56,51 +56,51 @@ type State struct { } type ResultGetHeightState struct { - ResultBase // NOTE: the state index of this will not be populated + ResultBase State } -// Option is a function that sets a parameter on the settlement layer. + type Option func(ClientI) -// ClientI defines generic interface for Settlement layer interaction. + type ClientI interface { - // Init is called once for the client initialization + Init(config Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...Option) error - // Start is called once, after Init. It's implementation should start the client service. + Start() error - // Stop is called once, after Start. It should stop the client service. + Stop() error - // SubmitBatch tries submitting the batch in an async way to the settlement layer. This should create a transaction which (potentially) - // triggers a state transition in the settlement layer. Events are emitted on success or failure. + + SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error - // GetLatestBatch returns the latest batch from the settlement layer. + GetLatestBatch() (*ResultRetrieveBatch, error) - // GetBatchAtIndex returns the batch at the given index. + GetBatchAtIndex(index uint64) (*ResultRetrieveBatch, error) - // GetSequencerByAddress returns all sequencer information by its address. + GetSequencerByAddress(address string) (types.Sequencer, error) - // GetBatchAtHeight returns the batch at the given height. + GetBatchAtHeight(index uint64) (*ResultRetrieveBatch, error) - // GetLatestHeight returns the latest state update height from the settlement layer. + GetLatestHeight() (uint64, error) - // GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + GetLatestFinalizedHeight() (uint64, error) - // GetAllSequencers returns all sequencers for this rollapp (bonded and not bonded). + GetAllSequencers() ([]types.Sequencer, error) - // GetBondedSequencers returns the list of the bonded sequencers for this rollapp. + GetBondedSequencers() ([]types.Sequencer, error) - // GetProposerAtHeight returns the current proposer for this chain. + GetProposerAtHeight(height int64) (*types.Sequencer, error) - // GetNextProposer returns the next proposer for this chain in case of a rotation. - // If no rotation is in progress, it should return nil. + + GetNextProposer() (*types.Sequencer, error) - // GetRollapp returns the rollapp information. + GetRollapp() (*types.Rollapp, error) - // GetObsoleteDrs returns the list of deprecated DRS. + GetObsoleteDrs() ([]uint32, error) - // GetSignerBalance returns the balance of the signer. + GetSignerBalance() (types.Balance, error) - // ValidateGenesisBridgeData validates the genesis bridge data. + ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error } diff --git a/store/badger.go b/store/badger.go index 5fbb244f5..6a67526f2 100644 --- a/store/badger.go +++ b/store/badger.go @@ -16,7 +16,7 @@ import ( const ( gcTimeout = 1 * time.Minute - discardRatio = 0.5 // Recommended by badger. Indicates that a file will be rewritten if half the space can be discarded. + discardRatio = 0.5 ) var ( @@ -24,14 +24,14 @@ var ( _ KVBatch = &BadgerBatch{} ) -// BadgerKV is a implementation of KVStore using Badger v3. + type BadgerKV struct { db *badger.DB closing chan struct{} closeOnce sync.Once } -// NewDefaultInMemoryKVStore builds KVStore that works in-memory (without accessing disk). + func NewDefaultInMemoryKVStore() KV { db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) if err != nil { @@ -58,12 +58,12 @@ func NewKVStore(rootDir, dbPath, dbName string, syncWrites bool, logger types.Lo return b } -// NewDefaultKVStore creates instance of default key-value store. + func NewDefaultKVStore(rootDir, dbPath, dbName string) KV { return NewKVStore(rootDir, dbPath, dbName, true, log.NewNopLogger()) } -// Rootify is helper function to make config creation independent of root dir + func Rootify(rootDir, dbPath string) string { if filepath.IsAbs(dbPath) { return dbPath @@ -71,7 +71,7 @@ func Rootify(rootDir, dbPath string) string { return filepath.Join(rootDir, dbPath) } -// Close implements KVStore. + func (b *BadgerKV) Close() error { b.closeOnce.Do(func() { close(b.closing) @@ -85,7 +85,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L for { select { case <-b.closing: - // Exit the periodic garbage collector function when store is closed + return case <-ticker.C: err := b.db.RunValueLogGC(discardRatio) @@ -97,7 +97,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L } } -// Get returns value for given key, or error. + func (b *BadgerKV) Get(key []byte) ([]byte, error) { txn := b.db.NewTransaction(false) defer txn.Discard() @@ -111,7 +111,7 @@ func (b *BadgerKV) Get(key []byte) ([]byte, error) { return item.ValueCopy(nil) } -// Set saves key-value mapping in store. + func (b *BadgerKV) Set(key []byte, value []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -122,7 +122,7 @@ func (b *BadgerKV) Set(key []byte, value []byte) error { return txn.Commit() } -// Delete removes key and corresponding value from store. + func (b *BadgerKV) Delete(key []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -133,20 +133,20 @@ func (b *BadgerKV) Delete(key []byte) error { return txn.Commit() } -// NewBatch creates new batch. -// Note: badger batches should be short lived as they use extra resources. + + func (b *BadgerKV) NewBatch() KVBatch { return &BadgerBatch{ txn: b.db.NewTransaction(true), } } -// BadgerBatch encapsulates badger transaction + type BadgerBatch struct { txn *badger.Txn } -// Set accumulates key-value entries in a transaction + func (bb *BadgerBatch) Set(key, value []byte) error { if err := bb.txn.Set(key, value); err != nil { return err @@ -155,24 +155,24 @@ func (bb *BadgerBatch) Set(key, value []byte) error { return nil } -// Delete removes the key and associated value from store + func (bb *BadgerBatch) Delete(key []byte) error { return bb.txn.Delete(key) } -// Commit commits a transaction + func (bb *BadgerBatch) Commit() error { return bb.txn.Commit() } -// Discard cancels a transaction + func (bb *BadgerBatch) Discard() { bb.txn.Discard() } var _ KVIterator = &BadgerIterator{} -// PrefixIterator returns instance of prefix Iterator for BadgerKV. + func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { txn := b.db.NewTransaction(false) iter := txn.NewIterator(badger.DefaultIteratorOptions) @@ -185,7 +185,7 @@ func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { } } -// BadgerIterator encapsulates prefix iterator for badger kv store. + type BadgerIterator struct { txn *badger.Txn iter *badger.Iterator @@ -193,22 +193,22 @@ type BadgerIterator struct { lastError error } -// Valid returns true if iterator is inside its prefix, false otherwise. + func (i *BadgerIterator) Valid() bool { return i.iter.ValidForPrefix(i.prefix) } -// Next progresses iterator to the next key-value pair. + func (i *BadgerIterator) Next() { i.iter.Next() } -// Key returns key pointed by iterator. + func (i *BadgerIterator) Key() []byte { return i.iter.Item().KeyCopy(nil) } -// Value returns value pointer by iterator. + func (i *BadgerIterator) Value() []byte { val, err := i.iter.Item().ValueCopy(nil) if err != nil { @@ -217,45 +217,45 @@ func (i *BadgerIterator) Value() []byte { return val } -// Error returns last error that occurred during iteration. + func (i *BadgerIterator) Error() error { return i.lastError } -// Discard has to be called to free iterator resources. + func (i *BadgerIterator) Discard() { i.iter.Close() i.txn.Discard() } -// memoryEfficientBadgerConfig sets badger configuration parameters to reduce memory usage, specially during compactions to avoid memory spikes that causes OOM. -// based on https://github.com/celestiaorg/celestia-node/issues/2905 + + func memoryEfficientBadgerConfig(path string, syncWrites bool) *badger.Options { - opts := badger.DefaultOptions(path) // this must be copied - // SyncWrites is a configuration option in Badger that determines whether writes are immediately synced to disk or no. - // If set to true it writes to the write-ahead log (value log) are synced to disk before being applied to the LSM tree. + opts := badger.DefaultOptions(path) + + opts.SyncWrites = syncWrites - // default 64mib => 0 - disable block cache - // BlockCacheSize specifies how much data cache should hold in memory. - // It improves lookup performance but increases memory consumption. - // Not really necessary if disabling compression + + + + opts.BlockCacheSize = 0 - // compressions reduces storage usage but increases memory consumption, specially during compaction + opts.Compression = options.None - // MemTables: maximum size of in-memory data structures before they are flushed to disk - // default 64mib => 16mib - decreases memory usage and makes compaction more often + + opts.MemTableSize = 16 << 20 - // NumMemtables is a configuration option in Badger that sets the maximum number of memtables to keep in memory before stalling - // default 5 => 3 + + opts.NumMemtables = 3 - // NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts - // default 5 => 3 + + opts.NumLevelZeroTables = 3 - // default 15 => 5 - this prevents memory growth on CPU constraint systems by blocking all writers + opts.NumLevelZeroTablesStall = 5 - // reducing number compactors, makes it slower but reduces memory usage during compaction + opts.NumCompactors = 2 - // makes sure badger is always compacted on shutdown + opts.CompactL0OnClose = true return &opts diff --git a/store/prefix.go b/store/prefix.go index 23842dff3..e0f4f77d6 100644 --- a/store/prefix.go +++ b/store/prefix.go @@ -5,18 +5,18 @@ var ( _ KVBatch = &PrefixKVBatch{} ) -// PrefixKV is a key-value store that prepends all keys with given prefix. + type PrefixKV struct { kv KV prefix []byte } -// Close implements KVStore. + func (p *PrefixKV) Close() error { return p.kv.Close() } -// NewPrefixKV creates new PrefixKV on top of other KVStore. + func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { return &PrefixKV{ kv: kv, @@ -24,22 +24,22 @@ func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { } } -// Get returns value for given key. + func (p *PrefixKV) Get(key []byte) ([]byte, error) { return p.kv.Get(append(p.prefix, key...)) } -// Set updates the value for given key. + func (p *PrefixKV) Set(key []byte, value []byte) error { return p.kv.Set(append(p.prefix, key...), value) } -// Delete deletes key-value pair for given key. + func (p *PrefixKV) Delete(key []byte) error { return p.kv.Delete(append(p.prefix, key...)) } -// NewBatch creates a new batch. + func (p *PrefixKV) NewBatch() KVBatch { return &PrefixKVBatch{ b: p.kv.NewBatch(), @@ -47,33 +47,33 @@ func (p *PrefixKV) NewBatch() KVBatch { } } -// PrefixIterator creates iterator to traverse given prefix. + func (p *PrefixKV) PrefixIterator(prefix []byte) KVIterator { return p.kv.PrefixIterator(append(p.prefix, prefix...)) } -// PrefixKVBatch enables batching of operations on PrefixKV. + type PrefixKVBatch struct { b KVBatch prefix []byte } -// Set adds key-value pair to batch. + func (pb *PrefixKVBatch) Set(key, value []byte) error { return pb.b.Set(append(pb.prefix, key...), value) } -// Delete adds delete operation to batch. + func (pb *PrefixKVBatch) Delete(key []byte) error { return pb.b.Delete(append(pb.prefix, key...)) } -// Commit applies all operations in the batch atomically. + func (pb *PrefixKVBatch) Commit() error { return pb.b.Commit() } -// Discard discards all operations in the batch. + func (pb *PrefixKVBatch) Discard() { pb.b.Discard() } diff --git a/store/pruning.go b/store/pruning.go index 5940f8ae9..5d3ee3ed3 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// PruneStore removes blocks up to (but not including) a height. It returns number of blocks pruned. + func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { pruned := uint64(0) from, err := s.LoadBaseHeight() @@ -29,7 +29,7 @@ func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error return pruned, nil } -// pruneHeights prunes all store entries that are stored along blocks (blocks,commit,proposer, etc) + func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint64, error) { pruneBlocks := func(batch KVBatch, height uint64) error { hash, err := s.loadHashFromIndex(height) @@ -64,7 +64,7 @@ func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint6 return pruned, err } -// prune is the function that iterates through all heights and prunes according to the pruning function set + func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height uint64) error, logger types.Logger) (uint64, error) { pruned := uint64(0) batch := s.db.NewBatch() @@ -86,7 +86,7 @@ func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height u } pruned++ - // flush every 1000 blocks to avoid batches becoming too large + if pruned%1000 == 0 && pruned > 0 { err := flush(batch, h) if err != nil { diff --git a/store/store.go b/store/store.go index f0be24df9..a0ee6dbd8 100644 --- a/store/store.go +++ b/store/store.go @@ -30,33 +30,33 @@ var ( lastBlockSequencerSetPrefix = [1]byte{14} ) -// DefaultStore is a default store implementation. + type DefaultStore struct { db KV } var _ Store = &DefaultStore{} -// New returns new, default store. + func New(kv KV) Store { return &DefaultStore{ db: kv, } } -// Close implements Store. + func (s *DefaultStore) Close() error { return s.db.Close() } -// NewBatch creates a new db batch. + func (s *DefaultStore) NewBatch() KVBatch { return s.db.NewBatch() } -// SaveBlock adds block to the store along with corresponding commit. -// Stored height is updated if block height is greater than stored value. -// In case a batch is provided, the block and commit are added to the batch and not saved. + + + func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) { hash := block.Header.Hash() blockBlob, err := block.MarshalBinary() @@ -69,7 +69,7 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return batch, fmt.Errorf("marshal Commit to binary: %w", err) } - // Not sure it's neeeded, as it's not used anywhere + if batch != nil { err = multierr.Append(err, batch.Set(getBlockKey(hash), blockBlob)) err = multierr.Append(err, batch.Set(getCommitKey(hash), commitBlob)) @@ -94,10 +94,10 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return nil, nil } -// LoadBlock returns block at given height, or error if it's not found in Store. -// TODO(tzdybal): what is more common access pattern? by height or by hash? -// currently, we're indexing height->hash, and store blocks by hash, but we might as well store by height -// and index hash->height + + + + func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { h, err := s.loadHashFromIndex(height) if err != nil { @@ -106,7 +106,7 @@ func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { return s.LoadBlockByHash(h) } -// LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. + func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { blockData, err := s.db.Get(getBlockKey(hash)) if err != nil { @@ -121,7 +121,7 @@ func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return block, nil } -// SaveBlockSource saves block validation in Store. + func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, batch KVBatch) (KVBatch, error) { b := make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(source)) @@ -132,7 +132,7 @@ func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, return batch, err } -// LoadBlockSource returns block validation in Store. + func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) { source, err := s.db.Get(getSourceKey(height)) if err != nil { @@ -141,7 +141,7 @@ func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) return types.BlockSource(binary.LittleEndian.Uint64(source)), nil } -// SaveBlockResponses saves block responses (events, tx responses, etc) in Store. + func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) { data, err := responses.Marshal() if err != nil { @@ -154,7 +154,7 @@ func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCI return batch, err } -// LoadBlockResponses returns block results at given height, or error if it's not found in Store. + func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) { data, err := s.db.Get(getResponsesKey(height)) if err != nil { @@ -168,7 +168,7 @@ func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses return &responses, nil } -// LoadCommit returns commit for a block at given height, or error if it's not found in Store. + func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { hash, err := s.loadHashFromIndex(height) if err != nil { @@ -177,7 +177,7 @@ func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { return s.LoadCommitByHash(hash) } -// LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. + func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { commitData, err := s.db.Get(getCommitKey(hash)) if err != nil { @@ -191,8 +191,8 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return commit, nil } -// SaveState updates state saved in Store. Only one State is stored. -// If there is no State in Store, state will be saved. + + func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, error) { pbState, err := state.ToProto() if err != nil { @@ -210,7 +210,7 @@ func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, er return batch, err } -// LoadState returns last state saved with UpdateState. + func (s *DefaultStore) LoadState() (*types.State, error) { blob, err := s.db.Get(getStateKey()) if err != nil { @@ -231,7 +231,7 @@ func (s *DefaultStore) LoadState() (*types.State, error) { return &state, nil } -// SaveProposer stores the proposer for given block height in store. + func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) { pbProposer, err := proposer.ToProto() if err != nil { @@ -249,7 +249,7 @@ func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, bat return batch, err } -// LoadProposer loads proposer at given block height from store. + func (s *DefaultStore) LoadProposer(height uint64) (types.Sequencer, error) { blob, err := s.db.Get(getProposerKey(height)) if err != nil { diff --git a/store/storeIface.go b/store/storeIface.go index 8220b25ad..4cdd2265b 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -7,27 +7,27 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// KV encapsulates key-value store abstraction, in minimalistic interface. -// -// KV MUST be thread safe. + + + type KV interface { - Get(key []byte) ([]byte, error) // Get gets the value for a key. - Set(key []byte, value []byte) error // Set updates the value for a key. - Delete(key []byte) error // Delete deletes a key. - NewBatch() KVBatch // NewBatch creates a new batch. - PrefixIterator(prefix []byte) KVIterator // PrefixIterator creates iterator to traverse given prefix. - Close() error // Close closes the store. + Get(key []byte) ([]byte, error) + Set(key []byte, value []byte) error + Delete(key []byte) error + NewBatch() KVBatch + PrefixIterator(prefix []byte) KVIterator + Close() error } -// KVBatch enables batching of transactions. + type KVBatch interface { - Set(key, value []byte) error // Accumulates KV entries in a transaction. - Delete(key []byte) error // Deletes the given key. - Commit() error // Commits the transaction. - Discard() // Discards the transaction. + Set(key, value []byte) error + Delete(key []byte) error + Commit() error + Discard() } -// KVIterator enables traversal over a given prefix. + type KVIterator interface { Valid() bool Next() @@ -37,37 +37,37 @@ type KVIterator interface { Discard() } -// Store is minimal interface for storing and retrieving blocks, commits and state. + type Store interface { - // NewBatch creates a new db batch. + NewBatch() KVBatch - // SaveBlock saves block along with its seen commit (which will be included in the next block). + SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) - // LoadBlock returns block at given height, or error if it's not found in Store. + LoadBlock(height uint64) (*types.Block, error) - // LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. + LoadBlockByHash(hash [32]byte) (*types.Block, error) - // SaveBlockResponses saves block responses (events, tx responses, validator set updates, etc) in Store. + SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) - // LoadBlockResponses returns block results at given height, or error if it's not found in Store. + LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) - // LoadCommit returns commit for a block at given height, or error if it's not found in Store. + LoadCommit(height uint64) (*types.Commit, error) - // LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. + LoadCommitByHash(hash [32]byte) (*types.Commit, error) - // SaveState updates state saved in Store. Only one State is stored. - // If there is no State in Store, state will be saved. + + SaveState(state *types.State, batch KVBatch) (KVBatch, error) - // LoadState returns last state saved with UpdateState. + LoadState() (*types.State, error) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go index 456f78b1d..ef45d2c3b 100644 --- a/test/loadtime/cmd/load/main.go +++ b/test/loadtime/cmd/load/main.go @@ -10,20 +10,20 @@ import ( "github.com/dymensionxyz/dymint/test/pb/loadtime" ) -// Ensure all of the interfaces are correctly satisfied. + var ( _ loadtest.ClientFactory = (*ClientFactory)(nil) _ loadtest.Client = (*TxGenerator)(nil) ) -// ClientFactory implements the loadtest.ClientFactory interface. + type ClientFactory struct { ID []byte } -// TxGenerator is responsible for generating transactions. -// TxGenerator holds the set of information that will be used to generate -// each transaction. + + + type TxGenerator struct { id []byte conns uint64 @@ -32,7 +32,7 @@ type TxGenerator struct { } func main() { - u := [16]byte(uuid.New()) // generate run ID on startup + u := [16]byte(uuid.New()) if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{ID: u[:]}); err != nil { panic(err) } @@ -44,7 +44,7 @@ func main() { }) } -// ValidateConfig validates the configuration for the load test. + func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { psb, err := payload.MaxUnpaddedSize() if err != nil { @@ -56,9 +56,9 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { return nil } -// NewClient creates a new client for the load test. -// -//nolint:gosec // params are always positive and fall in uint64 + + + func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { return &TxGenerator{ id: f.ID, @@ -68,7 +68,7 @@ func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) }, nil } -// GenerateTx generates a new transactions for the load test. + func (c *TxGenerator) GenerateTx() ([]byte, error) { return payload.NewBytes(&loadtime.Payload{ Connections: c.conns, diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 4fd90ebe3..1f17e6f17 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -19,19 +19,19 @@ const ( var mainPrefix = [1]byte{0} -// BlockStore is a thin wrapper around the DefaultStore which will be used for inspecting the blocks + type BlockStore struct { *store.DefaultStore base uint64 height uint64 } -// Height implements report.BlockStore. + func (b *BlockStore) Height() uint64 { return b.height } -// Base will be used to get the block height of the first block we want to generate the report for + func (b *BlockStore) Base() uint64 { return b.base } diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go index ba538a1df..06f8d30b9 100644 --- a/test/loadtime/payload/payload.go +++ b/test/loadtime/payload/payload.go @@ -16,9 +16,9 @@ const ( maxPayloadSize = 4 * 1024 * 1024 ) -// NewBytes generates a new payload and returns the encoded representation of -// the payload as a slice of bytes. NewBytes uses the fields on the Options -// to create the payload. + + + func NewBytes(p *loadtime.Payload) ([]byte, error) { p.Padding = make([]byte, 1) nullTime := time.Time{} @@ -32,12 +32,12 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { if p.Size() > maxPayloadSize { return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size(), maxPayloadSize) } - pSize := int(p.GetSize_()) // #nosec -- The "if" above makes this cast safe + pSize := int(p.GetSize_()) if pSize < us { return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) } - // We halve the padding size because we transform the TX to hex + p.Padding = make([]byte, (pSize-us)/2) _, err = rand.Read(p.Padding) if err != nil { @@ -49,14 +49,14 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { } h := []byte(hex.EncodeToString(b)) - // prepend a single key so that the kv store only ever stores a single - // transaction instead of storing all tx and ballooning in size. + + return append([]byte(keyPrefix), h...), nil } -// FromBytes extracts a paylod from the byte representation of the payload. -// FromBytes leaves the padding untouched, returning it to the caller to handle -// or discard per their preference. + + + func FromBytes(b []byte) (*loadtime.Payload, error) { trH := bytes.TrimPrefix(b, []byte(keyPrefix)) if bytes.Equal(b, trH) { @@ -75,8 +75,8 @@ func FromBytes(b []byte) (*loadtime.Payload, error) { return p, nil } -// MaxUnpaddedSize returns the maximum size that a payload may be if no padding -// is included. + + func MaxUnpaddedSize() (int, error) { p := &loadtime.Payload{ Time: time.Now(), @@ -88,9 +88,9 @@ func MaxUnpaddedSize() (int, error) { return CalculateUnpaddedSize(p) } -// CalculateUnpaddedSize calculates the size of the passed in payload for the -// purpose of determining how much padding to add to reach the target size. -// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1. + + + func CalculateUnpaddedSize(p *loadtime.Payload) (int, error) { if len(p.Padding) != 1 { return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index 0a8746d7d..f38865ecf 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -13,66 +13,66 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// BlockStore defines the set of methods needed by the report generator from -// Tendermint's store.Blockstore type. Using an interface allows for tests to -// more easily simulate the required behavior without having to use the more -// complex real API. + + + + type BlockStore interface { Height() uint64 Base() uint64 LoadBlock(uint64) (*types.Block, error) } -// DataPoint contains the set of data collected for each transaction. + type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte } -// Report contains the data calculated from reading the timestamped transactions -// of each block found in the blockstore. + + type Report struct { ID uuid.UUID Rate, Connections, Size uint64 Max, Min, Avg, StdDev time.Duration - // NegativeCount is the number of negative durations encountered while - // reading the transaction data. A negative duration means that - // a transaction timestamp was greater than the timestamp of the block it - // was included in and likely indicates an issue with the experimental - // setup. + + + + + NegativeCount int - // TPS is calculated by taking the highest averaged TPS over all consecutive blocks + TPS uint64 - // All contains all data points gathered from all valid transactions. - // The order of the contents of All is not guaranteed to be match the order of transactions - // in the chain. + + + All []DataPoint - // used for calculating average during report creation. + sum int64 } -// Reports is a collection of Report objects. + type Reports struct { s map[uuid.UUID]Report l []Report - // errorCount is the number of parsing errors encountered while reading the - // transaction data. Parsing errors may occur if a transaction not generated - // by the payload package is submitted to the chain. + + + errorCount int } -// List returns a slice of all reports. + func (rs *Reports) List() []Report { return rs.l } -// ErrorCount returns the number of erronous transactions encountered while creating the report + func (rs *Reports) ErrorCount() int { return rs.errorCount } @@ -100,9 +100,9 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has if int64(l) < 0 { r.NegativeCount++ } - // Using an int64 here makes an assumption about the scale and quantity of the data we are processing. - // If all latencies were 2 seconds, we would need around 4 billion records to overflow this. - // We are therefore assuming that the data does not exceed these bounds. + + + r.sum += int64(l) rs.s[id] = r } @@ -122,14 +122,14 @@ func (rs *Reports) calculateAll() { } } -// calculateTPS calculates the TPS by calculating a average moving window with a minimum size of 1 second over all consecutive blocks + func calculateTPS(in []DataPoint) uint64 { - // create a map of block times to the number of transactions in that block + blocks := make(map[time.Time]int) for _, v := range in { blocks[v.BlockTime]++ } - // sort the blocks by time + var blockTimes []time.Time for k := range blocks { blockTimes = append(blockTimes, k) @@ -137,7 +137,7 @@ func calculateTPS(in []DataPoint) uint64 { sort.Slice(blockTimes, func(i, j int) bool { return blockTimes[i].Before(blockTimes[j]) }) - // Iterave over the blocks and calculate the tps starting from each block + TPS := uint64(0) for index, blockTime := range blockTimes { currentTx := blocks[blockTime] @@ -160,8 +160,8 @@ func (rs *Reports) addError() { rs.errorCount++ } -// GenerateFromBlockStore creates a Report using the data in the provided -// BlockStore. + + func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID @@ -179,11 +179,11 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { s: make(map[uuid.UUID]Report), } - // Deserializing to proto can be slow but does not depend on other data - // and can therefore be done in parallel. - // Deserializing in parallel does mean that the resulting data is - // not guaranteed to be delivered in the same order it was given to the - // worker pool. + + + + + const poolSize = 16 txc := make(chan txData) diff --git a/testutil/block.go b/testutil/block.go index f60257055..a07944d9e 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -37,14 +37,14 @@ const ( DefaultTestBatchSize = 5 ) -/* -------------------------------------------------------------------------- */ -/* utils */ -/* -------------------------------------------------------------------------- */ + + + func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.ClientI, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { genesis := GenerateGenesis(genesisHeight) - // Change the LastBlockHeight to avoid calling InitChainSync within the manager - // And updating the state according to the genesis. + + raw, _ := proposerKey.GetPublic().Raw() pubkey := ed25519.PubKey(raw) @@ -67,7 +67,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt return nil, err } - // Init the settlement layer mock + if settlementlc == nil { settlementlc = slregistry.GetClient(slregistry.Local) } @@ -96,7 +96,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt mp := mempoolv1.NewTxMempool(logger, tmcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0) mpIDs := nodemempool.NewMempoolIDs() - // Init p2p client and validator + p2pKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) p2pClient, err := p2p.NewClient(config.P2PConfig{ GossipSubCacheSize: 50, diff --git a/testutil/logger.go b/testutil/logger.go index 3ef7a902d..c71789897 100644 --- a/testutil/logger.go +++ b/testutil/logger.go @@ -6,15 +6,15 @@ import ( "testing" ) -// TODO(tzdybal): move to some common place -// Logger is a simple, yet thread-safe, logger intended for use in unit tests. + + type Logger struct { mtx *sync.Mutex T *testing.T } -// NewLogger create a Logger that outputs data using given testing.T instance. + func NewLogger(t *testing.T) *Logger { return &Logger{ mtx: new(sync.Mutex), @@ -22,7 +22,7 @@ func NewLogger(t *testing.T) *Logger { } } -// Debug prints a debug message. + func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -30,7 +30,7 @@ func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"DEBUG: " + msg}, keyvals...)...) } -// Info prints an info message. + func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -38,7 +38,7 @@ func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"INFO: " + msg}, keyvals...)...) } -// Error prints an error message. + func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -46,24 +46,24 @@ func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"ERROR: " + msg}, keyvals...)...) } -// MockLogger is a fake logger that accumulates all the inputs. -// -// It can be used in tests to ensure that certain messages was logged with correct severity. + + + type MockLogger struct { DebugLines, InfoLines, ErrLines []string } -// Debug saves a debug message. + func (t *MockLogger) Debug(msg string, keyvals ...interface{}) { t.DebugLines = append(t.DebugLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } -// Info saves an info message. + func (t *MockLogger) Info(msg string, keyvals ...interface{}) { t.InfoLines = append(t.InfoLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } -// Error saves an error message. + func (t *MockLogger) Error(msg string, keyvals ...interface{}) { t.ErrLines = append(t.ErrLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } diff --git a/testutil/mocks.go b/testutil/mocks.go index 176f8d6d7..e750c081b 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -29,27 +29,27 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// ABCIMethod is a string representing an ABCI method + type ABCIMethod string const ( - // InitChain is the string representation of the InitChain ABCI method + InitChain ABCIMethod = "InitChain" - // CheckTx is the string representation of the CheckTx ABCI method + CheckTx ABCIMethod = "CheckTx" - // BeginBlock is the string representation of the BeginBlockMethod ABCI method + BeginBlock ABCIMethod = "BeginBlock" - // DeliverTx is the string representation of the DeliverTx ABCI method + DeliverTx ABCIMethod = "DeliverTx" - // EndBlock is the string representation of the EndBlock ABCI method + EndBlock ABCIMethod = "EndBlock" - // Commit is the string representation of the Commit ABCI method + Commit ABCIMethod = "Commit" - // Info is the string representation of the Info ABCI method + Info ABCIMethod = "Info" ) -// GetABCIProxyAppMock returns a dummy abci proxy app mock for testing + func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { app := GetAppMock() @@ -60,7 +60,7 @@ func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { return proxyApp } -// GetAppMock returns a dummy abci app mock for testing + func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app := &tmmocks.MockApplication{} gbdBz, _ := tmjson.Marshal(rollapptypes.GenesisBridgeData{}) @@ -72,7 +72,7 @@ func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) app.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) - // iterate exclude methods and unset the mock + for _, method := range excludeMethods { UnsetMockFn(app.On(string(method))) } @@ -92,7 +92,7 @@ var UnsetMockFn = func(call *mock.Call) { } } -// CountMockCalls returns the number of times a mock specific function was called + func CountMockCalls(totalCalls []mock.Call, methodName string) int { var count int for _, call := range totalCalls { @@ -103,7 +103,7 @@ func CountMockCalls(totalCalls []mock.Call, methodName string) int { return count } -// MockStore is a mock store for testing + type MockStore struct { ShoudFailSaveState bool ShouldFailUpdateStateWithBatch bool @@ -111,8 +111,8 @@ type MockStore struct { height uint64 } -// SetHeight sets the height of the mock store -// Don't set the height to mock failure in setting the height + + func (m *MockStore) SetHeight(height uint64) { m.height = height } @@ -125,7 +125,7 @@ func (m *MockStore) NextHeight() uint64 { return m.height + 1 } -// UpdateState updates the state of the mock store + func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KVBatch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") @@ -133,7 +133,7 @@ func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KV return m.DefaultStore.SaveState(state, batch) } -// NewMockStore returns a new mock store + func NewMockStore() *MockStore { defaultStore := store.New(store.NewDefaultInMemoryKVStore()) return &MockStore{ @@ -148,27 +148,27 @@ const ( connectionRefusedErrorMessage = "connection refused" ) -// DALayerClientSubmitBatchError is a mock data availability layer client that can be used to test error handling + type DALayerClientSubmitBatchError struct { localda.DataAvailabilityLayerClient } -// SubmitBatch submits a batch to the data availability layer + func (s *DALayerClientSubmitBatchError) SubmitBatch(_ *types.Batch) da.ResultSubmitBatch { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: connectionRefusedErrorMessage, Error: errors.New(connectionRefusedErrorMessage)}} } -// DALayerClientRetrieveBatchesError is a mock data availability layer client that can be used to test error handling + type DALayerClientRetrieveBatchesError struct { localda.DataAvailabilityLayerClient } -// RetrieveBatches retrieves batches from the data availability layer + func (m *DALayerClientRetrieveBatchesError) RetrieveBatches(_ *da.DASubmitMetaData) da.ResultRetrieveBatch { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: batchNotFoundErrorMessage, Error: da.ErrBlobNotFound}} } -// SubscribeMock is a mock to provide a subscription like behavior for testing + type SubscribeMock struct { messageCh chan interface{} } @@ -195,8 +195,8 @@ type MockDA struct { func NewMockDA(t *testing.T) (*MockDA, error) { mockDA := &MockDA{} - // Create DA - // init celestia DA with mock RPC client + + mockDA.DaClient = registry.GetClient("celestia") config := celestia.Config{ @@ -233,7 +233,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { nIDSize := 1 tree := exampleNMT(nIDSize, true, 1, 2, 3, 4) - // build a proof for an NID that is within the namespace range of the tree + proof, _ := tree.ProveNamespace(mockDA.NID) mockDA.BlobProof = blob.Proof([]*nmt.Proof{&proof}) @@ -244,7 +244,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { return mockDA, nil } -// exampleNMT creates a new NamespacedMerkleTree with the given namespace ID size and leaf namespace IDs. Each byte in the leavesNIDs parameter corresponds to one leaf's namespace ID. If nidSize is greater than 1, the function repeats each NID in leavesNIDs nidSize times before prepending it to the leaf data. + func exampleNMT(nidSize int, ignoreMaxNamespace bool, leavesNIDs ...byte) *nmt.NamespacedMerkleTree { tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(nidSize), nmt.IgnoreMaxNamespace(ignoreMaxNamespace)) for i, nid := range leavesNIDs { diff --git a/testutil/node.go b/testutil/node.go index 1f7f0955f..ac7e294b1 100644 --- a/testutil/node.go +++ b/testutil/node.go @@ -24,7 +24,7 @@ import ( func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, genesis *types.GenesisDoc) (*node.Node, error) { app := GetAppMock(EndBlock) - // Create proxy app + clientCreator := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() @@ -48,7 +48,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, signingKey, pubkey, _ := crypto.GenerateEd25519Key(rand.Reader) pubkeyBytes, _ := pubkey.Raw() - // Node config + nodeConfig := config.DefaultNodeConfig if blockManagerConfig == nil { @@ -62,7 +62,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, } nodeConfig.BlockManagerConfig = *blockManagerConfig - // SL config + nodeConfig.SettlementConfig = settlement.Config{ProposerPubKey: hex.EncodeToString(pubkeyBytes)} node, err := node.NewNode( diff --git a/testutil/p2p.go b/testutil/p2p.go index 8dd88fbe8..318abb499 100644 --- a/testutil/p2p.go +++ b/testutil/p2p.go @@ -45,10 +45,10 @@ type HostDescr struct { RealKey bool } -// copied from libp2p net/mock + var blackholeIP6 = net.ParseIP("100::") -// copied from libp2p net/mock + func getAddr(sk crypto.PrivKey) (multiaddr.Multiaddr, error) { id, err := peer.IDFromPrivateKey(sk) if err != nil { @@ -92,7 +92,7 @@ func StartTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]Hos err := mnet.LinkAll() require.NoError(err) - // prepare seed node lists + seeds := make([]string, n) for src, descr := range conf { require.Less(src, n) diff --git a/testutil/rpc.go b/testutil/rpc.go index 80b31c1e6..bbf17dae0 100644 --- a/testutil/rpc.go +++ b/testutil/rpc.go @@ -13,13 +13,13 @@ import ( ) func CreateLocalServer(t *testing.T) (*rpc.Server, net.Listener) { - // Create a new local listener + listener, err := nettest.NewLocalListener("tcp") require.NoError(t, err) serverReadyCh := make(chan bool, 1) var server *rpc.Server - // Start server with listener + go func() { node, err := CreateNode(true, nil, GenerateGenesis(0)) require.NoError(t, err) diff --git a/testutil/types.go b/testutil/types.go index 7f04ddd4d..70ce267db 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -21,9 +21,9 @@ import ( ) const ( - // BlockVersion is the default block version for testing + BlockVersion = 1 - // AppVersion is the default app version for testing + AppVersion = 0 SettlementAccountPrefix = "dym" @@ -63,7 +63,7 @@ func GenerateSettlementAddress() string { return addr } -// generateBlock generates random blocks. + func generateBlock(height uint64, proposerHash []byte, lastHeaderHash [32]byte) *types.Block { h := createRandomHashes() @@ -135,7 +135,7 @@ func GenerateBlocksWithTxs(startHeight uint64, num uint64, proposerKey crypto.Pr return blocks, nil } -// GenerateBlocks generates random blocks. + func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -163,7 +163,7 @@ func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, return blocks, nil } -// GenerateCommits generates commits based on passed blocks. + func GenerateCommits(blocks []*types.Block, proposerKey crypto.PrivKey) ([]*types.Commit, error) { commits := make([]*types.Commit, len(blocks)) @@ -205,7 +205,7 @@ func generateSignature(proposerKey crypto.PrivKey, header *types.Header) ([]byte return sign, nil } -// GenerateBatch generates a batch out of random blocks + func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) (*types.Batch, error) { blocks, err := GenerateBlocks(startHeight, endHeight-startHeight+1, proposerKey, lastBlockHeader) if err != nil { @@ -223,7 +223,7 @@ func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.Priv return batch, nil } -// GenerateLastBatch generates a final batch with LastBatch flag set to true and different NextSequencerHash + func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, nextSequencerKey crypto.PrivKey, lastHeaderHash [32]byte) (*types.Batch, error) { nextSequencerRaw, _ := nextSequencerKey.Raw() nextSeq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(nextSequencerRaw).PubKey(), 1)) @@ -248,7 +248,7 @@ func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto. return batch, nil } -// GenerateLastBlocks es similar a GenerateBlocks pero incluye el NextSequencerHash + func GenerateLastBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastHeaderHash [32]byte, nextSequencerHash [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -304,7 +304,7 @@ func MustGenerateBatchAndKey(startHeight uint64, endHeight uint64) *types.Batch return MustGenerateBatch(startHeight, endHeight, proposerKey) } -// GenerateRandomValidatorSet generates random validator sets + func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { return tmtypes.NewValidatorSet([]*tmtypes.Validator{ tmtypes.NewValidator(ed25519.GenPrivKey().PubKey(), 1), @@ -320,11 +320,11 @@ func GenerateSequencer() types.Sequencer { ) } -// GenerateStateWithSequencer generates an initial state for testing. + func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubkey tmcrypto.PubKey) *types.State { s := &types.State{ ChainID: "test-chain", - InitialHeight: uint64(initialHeight), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, LastResultsHash: GetEmptyLastResultsHash(), Version: tmstate.Version{ @@ -350,11 +350,11 @@ func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubk GenerateSettlementAddress(), []string{GenerateSettlementAddress()}, )) - s.SetHeight(uint64(lastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 + s.SetHeight(uint64(lastBlockHeight)) return s } -// GenerateGenesis generates a genesis for testing. + func GenerateGenesis(initialHeight int64) *tmtypes.GenesisDoc { return &tmtypes.GenesisDoc{ ChainID: "test-chain", diff --git a/types/batch.go b/types/batch.go index 14d486539..ecfadd20f 100644 --- a/types/batch.go +++ b/types/batch.go @@ -1,21 +1,21 @@ package types const ( - MaxBlockSizeAdjustment = 0.9 // have a safety margin of 10% in regard of MaxBlockBatchSizeBytes + MaxBlockSizeAdjustment = 0.9 ) -// Batch defines a struct for block aggregation for support of batching. -// TODO: maybe change to BlockBatch + + type Batch struct { Blocks []*Block Commits []*Commit - // LastBatch is true if this is the last batch of the sequencer (i.e completes it's rotation flow). + LastBatch bool DRSVersion []uint32 Revision uint64 } -// StartHeight is the height of the first block in the batch. + func (b Batch) StartHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -23,7 +23,7 @@ func (b Batch) StartHeight() uint64 { return b.Blocks[0].Header.Height } -// EndHeight is the height of the last block in the batch + func (b Batch) EndHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -31,14 +31,14 @@ func (b Batch) EndHeight() uint64 { return b.Blocks[len(b.Blocks)-1].Header.Height } -// NumBlocks is the number of blocks in the batch + func (b Batch) NumBlocks() uint64 { return uint64(len(b.Blocks)) } -// SizeBlockAndCommitBytes returns the sum of the size of bytes of the blocks and commits -// The actual size of the batch may be different due to additional metadata and protobuf -// optimizations. + + + func (b Batch) SizeBlockAndCommitBytes() int { cnt := 0 for _, block := range b.Blocks { diff --git a/types/block.go b/types/block.go index e6d2c1673..153eb3333 100644 --- a/types/block.go +++ b/types/block.go @@ -8,40 +8,40 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -// Header defines the structure of Dymint block header. + type Header struct { - // Block and App version + Version Version Height uint64 - Time int64 // UNIX time in nanoseconds. Use int64 as Golang stores UNIX nanoseconds in int64. + Time int64 - // prev block info + LastHeaderHash [32]byte - // hashes of block data - LastCommitHash [32]byte // commit from sequencer(s) from the last block - DataHash [32]byte // Block.Data root aka Transactions - ConsensusHash [32]byte // consensus params for current block - AppHash [32]byte // state after applying txs from height-1 + + LastCommitHash [32]byte + DataHash [32]byte + ConsensusHash [32]byte + AppHash [32]byte - // Root hash of all results from the txs from the previous block. - // This is ABCI specific but smart-contract chains require some way of committing - // to transaction receipts/results. + + + LastResultsHash [32]byte - // Note that the address can be derived from the pubkey which can be derived - // from the signature when using secp256k. - // We keep this in case users choose another signature format where the - // pubkey can't be recovered by the signature (e.g. ed25519). - ProposerAddress []byte // original proposer of the block + + + + + ProposerAddress []byte - // Hash of proposer validatorSet (compatible with tendermint) + SequencerHash [32]byte - // Hash of the next proposer validatorSet (compatible with tendermint) + NextSequencersHash [32]byte - // The Chain ID + ChainID string } @@ -54,16 +54,16 @@ var ( _ encoding.BinaryUnmarshaler = &Header{} ) -// Version captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -// This is equivalent to the tmversion.Consensus type in Tendermint. + + + + type Version struct { Block uint64 App uint64 } -// Block defines the structure of Dymint block. + type Block struct { Header Header Data Data @@ -83,7 +83,7 @@ var ( _ encoding.BinaryUnmarshaler = &Block{} ) -// Data defines Dymint block data. + type Data struct { Txs Txs IntermediateStateRoots IntermediateStateRoots @@ -91,16 +91,16 @@ type Data struct { ConsensusMessages []*proto.Any } -// EvidenceData defines how evidence is stored in block. + type EvidenceData struct { Evidence []Evidence } -// Commit contains evidence of block creation. + type Commit struct { Height uint64 HeaderHash [32]byte - // TODO(omritoptix): Change from []Signature to Signature as it should be one signature per block + Signatures []Signature TMSignature tmtypes.CommitSig } @@ -109,11 +109,11 @@ func (c Commit) SizeBytes() int { return c.ToProto().Size() } -// Signature represents signature of block creator. + type Signature []byte -// IntermediateStateRoots describes the state between transactions. -// They are required for fraud proofs. + + type IntermediateStateRoots struct { RawRootsList [][]byte } @@ -123,7 +123,7 @@ func GetLastCommitHash(lastCommit *Commit, header *Header) []byte { return lastABCICommit.Hash() } -// GetDataHash returns the hash of the block data to be set in the block header. + func GetDataHash(block *Block) []byte { abciData := tmtypes.Data{ Txs: ToABCIBlockDataTxs(&block.Data), diff --git a/types/block_source.go b/types/block_source.go index e6304c524..43a2a0be5 100644 --- a/types/block_source.go +++ b/types/block_source.go @@ -24,7 +24,7 @@ var AllSources = []string{"none", "produced", "gossip", "blocksync", "da", "loca type BlockMetaData struct { Source BlockSource DAHeight uint64 - SequencerSet Sequencers // The set of Rollapp sequencers that were present in the Hub while producing this block + SequencerSet Sequencers } type CachedBlock struct { diff --git a/types/conv.go b/types/conv.go index afbfc94a6..37f66eceb 100644 --- a/types/conv.go +++ b/types/conv.go @@ -6,22 +6,22 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -// ToABCIHeaderPB converts Dymint header to Header format defined in ABCI. -// Caller should fill all the fields that are not available in Dymint header (like ChainID). + + func ToABCIHeaderPB(header *Header) types.Header { tmheader := ToABCIHeader(header) return *tmheader.ToProto() } -// ToABCIHeader converts Dymint header to Header format defined in ABCI. -// Caller should fill all the fields that are not available in Dymint header (like ChainID). + + func ToABCIHeader(header *Header) tmtypes.Header { return tmtypes.Header{ Version: version.Consensus{ Block: header.Version.Block, App: header.Version.App, }, - Height: int64(header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(header.Height), Time: header.GetTimestamp(), LastBlockID: tmtypes.BlockID{ Hash: header.LastHeaderHash[:], @@ -43,12 +43,12 @@ func ToABCIHeader(header *Header) tmtypes.Header { } } -// ToABCIBlock converts Dymint block into block format defined by ABCI. -// Returned block should pass `ValidateBasic`. + + func ToABCIBlock(block *Block) (*tmtypes.Block, error) { abciHeader := ToABCIHeader(&block.Header) abciCommit := ToABCICommit(&block.LastCommit, &block.Header) - // This assumes that we have only one signature + if len(abciCommit.Signatures) == 1 { abciCommit.Signatures[0].ValidatorAddress = block.Header.ProposerAddress } @@ -65,7 +65,7 @@ func ToABCIBlock(block *Block) (*tmtypes.Block, error) { return &abciBlock, nil } -// ToABCIBlockDataTxs converts Dymint block-data into block-data format defined by ABCI. + func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { txs := make([]tmtypes.Tx, len(data.Txs)) for i := range data.Txs { @@ -74,7 +74,7 @@ func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { return txs } -// ToABCIBlockMeta converts Dymint block into BlockMeta format defined by ABCI + func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { tmblock, err := ToABCIBlock(block) if err != nil { @@ -90,13 +90,13 @@ func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { }, nil } -// ToABCICommit converts Dymint commit into commit format defined by ABCI. -// This function only converts fields that are available in Dymint commit. -// Other fields (especially ValidatorAddress and Timestamp of Signature) has to be filled by caller. + + + func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { headerHash := header.Hash() tmCommit := tmtypes.Commit{ - Height: int64(commit.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(commit.Height), Round: 0, BlockID: tmtypes.BlockID{ Hash: headerHash[:], @@ -106,7 +106,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { }, }, } - // Check if TMSignature exists. if not use the previous dymint signature for backwards compatibility. + if len(commit.TMSignature.Signature) == 0 { for _, sig := range commit.Signatures { commitSig := tmtypes.CommitSig{ @@ -115,7 +115,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { } tmCommit.Signatures = append(tmCommit.Signatures, commitSig) } - // This assumes that we have only one signature + if len(commit.Signatures) == 1 { tmCommit.Signatures[0].ValidatorAddress = header.ProposerAddress tmCommit.Signatures[0].Timestamp = header.GetTimestamp() diff --git a/types/errors.go b/types/errors.go index 033c5bd80..418e5e5a6 100644 --- a/types/errors.go +++ b/types/errors.go @@ -24,11 +24,11 @@ var ( ErrEmptyProposerAddress = errors.New("no proposer address") ) -// TimeFraudMaxDrift is the maximum allowed time drift between the block time and the local time. + var TimeFraudMaxDrift = 10 * time.Minute -// ErrFraudHeightMismatch is the fraud that occurs when the height of the block is different from the expected -// next height of the state. + + type ErrFraudHeightMismatch struct { Expected uint64 Actual uint64 @@ -37,7 +37,7 @@ type ErrFraudHeightMismatch struct { Proposer []byte } -// NewErrFraudHeightMismatch creates a new ErrFraudHeightMismatch error. + func NewErrFraudHeightMismatch(expected uint64, header *Header) error { return &ErrFraudHeightMismatch{ Expected: expected, @@ -56,7 +56,7 @@ func (e ErrFraudHeightMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrFraudAppHashMismatch is the fraud that occurs when the AppHash of the block is different from the expected AppHash. + type ErrFraudAppHashMismatch struct { Expected [32]byte @@ -66,7 +66,7 @@ type ErrFraudAppHashMismatch struct { Proposer []byte } -// NewErrFraudAppHashMismatch creates a new ErrFraudAppHashMismatch error. + func NewErrFraudAppHashMismatch(expected [32]byte, header *Header) error { return &ErrFraudAppHashMismatch{ Expected: expected, @@ -86,7 +86,7 @@ func (e ErrFraudAppHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrLastResultsHashMismatch indicates a potential fraud when the LastResultsHash of a block does not match the expected value. + type ErrLastResultsHashMismatch struct { Expected [32]byte @@ -96,7 +96,7 @@ type ErrLastResultsHashMismatch struct { LastResultHash [32]byte } -// NewErrLastResultsHashMismatch creates a new ErrLastResultsHashMismatch error. + func NewErrLastResultsHashMismatch(expected [32]byte, header *Header) error { return &ErrLastResultsHashMismatch{ Expected: expected, @@ -116,7 +116,7 @@ func (e ErrLastResultsHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrTimeFraud represents an error indicating a possible fraud due to time drift. + type ErrTimeFraud struct { Drift time.Duration ProposerAddress []byte @@ -153,7 +153,7 @@ func (e ErrTimeFraud) Unwrap() error { return gerrc.ErrFault } -// ErrLastHeaderHashMismatch is the error that occurs when the last header hash does not match the expected value. + type ErrLastHeaderHashMismatch struct { Expected [32]byte LastHeaderHash [32]byte @@ -174,7 +174,7 @@ func (e ErrLastHeaderHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidChainID is the fraud that occurs when the chain ID of the block is different from the expected chain ID. + type ErrInvalidChainID struct { Expected string Block *Block @@ -200,8 +200,8 @@ func (e ErrInvalidChainID) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidBlockHeightFraud is the fraud that happens when the height that is on the commit header is -// different from the height of the block. + + type ErrInvalidBlockHeightFraud struct { Expected uint64 Header *Header @@ -227,7 +227,7 @@ func (e ErrInvalidBlockHeightFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidHeaderHashFraud indicates a potential fraud when the Header Hash does not match the expected value. + type ErrInvalidHeaderHashFraud struct { ExpectedHash [32]byte Header *Header @@ -253,7 +253,7 @@ func (e ErrInvalidHeaderHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidSignatureFraud indicates a potential fraud due to an invalid signature in the block. + type ErrInvalidSignatureFraud struct { Err error Header *Header @@ -280,7 +280,7 @@ func (e ErrInvalidSignatureFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidProposerAddressFraud indicates a potential fraud when the proposer's address is invalid. + type ErrInvalidProposerAddressFraud struct { ExpectedAddress []byte ActualAddress tmcrypto.Address @@ -308,7 +308,7 @@ func (e ErrInvalidProposerAddressFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidSequencerHashFraud indicates a potential fraud when the sequencer's hash is invalid. + type ErrInvalidSequencerHashFraud struct { ExpectedHash [32]byte ActualHash []byte @@ -336,7 +336,7 @@ func (e ErrInvalidSequencerHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidNextSequencersHashFraud indicates a potential fraud when the NextSequencersHash does not match the expected value. + type ErrInvalidNextSequencersHashFraud struct { ExpectedHash [32]byte Header Header @@ -361,7 +361,7 @@ func (e ErrInvalidNextSequencersHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidHeaderDataHashFraud indicates a potential fraud when the Header Data Hash does not match the expected value. + type ErrInvalidHeaderDataHashFraud struct { Expected [32]byte Actual [32]byte @@ -390,7 +390,7 @@ func (e ErrInvalidHeaderDataHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateNumBlocksNotMatchingFraud represents an error where the number of blocks in the state update does not match the expected number. + type ErrStateUpdateNumBlocksNotMatchingFraud struct { StateIndex uint64 SLNumBlocks uint64 @@ -418,8 +418,8 @@ func (e ErrStateUpdateNumBlocksNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateHeightNotMatchingFraud is the fraud that happens when the height that is on the commit header is -// different from the height of the block. + + type ErrStateUpdateHeightNotMatchingFraud struct { StateIndex uint64 SLBeginHeight uint64 @@ -449,7 +449,7 @@ func (e ErrStateUpdateHeightNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateStateRootNotMatchingFraud represents an error where the state roots do not match in the state update. + type ErrStateUpdateStateRootNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -478,7 +478,7 @@ func (e ErrStateUpdateStateRootNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateTimestampNotMatchingFraud represents an error where the timestamps do not match in the state update. + type ErrStateUpdateTimestampNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -506,7 +506,7 @@ func (e ErrStateUpdateTimestampNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateDoubleSigningFraud indicates a potential fraud due to double signing detected between DA and P2P blocks. + type ErrStateUpdateDoubleSigningFraud struct { DABlock *Block P2PBlock *Block @@ -571,7 +571,7 @@ func getJsonFromBlock(block *Block) ([]byte, error) { return jsonBlock, nil } -// ErrStateUpdateBlobNotAvailableFraud represents an error where a blob is not available in DA. + type ErrStateUpdateBlobNotAvailableFraud struct { StateIndex uint64 DA string @@ -599,7 +599,7 @@ func (e ErrStateUpdateBlobNotAvailableFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateBlobCorruptedFraud represents an error where a blob is corrupted in DA. + type ErrStateUpdateBlobCorruptedFraud struct { StateIndex uint64 DA string @@ -627,7 +627,7 @@ func (e ErrStateUpdateBlobCorruptedFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateDRSVersionFraud represents an error where the DRS versions do not match in the state update. + type ErrStateUpdateDRSVersionFraud struct { StateIndex uint64 Height uint64 diff --git a/types/evidence.go b/types/evidence.go index ba17e1b0b..8aff5b04f 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -3,19 +3,19 @@ package types import ( "time" - // TODO: either copy the vanilla abci types (or the protos) into this repo - // or, import the vanilla tendermint types instead. + + abci "github.com/tendermint/tendermint/abci/types" ) -// Evidence represents any provable malicious activity by a validator. -// Verification logic for each evidence is part of the evidence module. + + type Evidence interface { - ABCI() []abci.Evidence // forms individual evidence to be sent to the application - Bytes() []byte // bytes which comprise the evidence - Hash() []byte // hash of the evidence - Height() int64 // height of the infraction - String() string // string format of the evidence - Time() time.Time // time of the infraction - ValidateBasic() error // basic consistency check + ABCI() []abci.Evidence + Bytes() []byte + Hash() []byte + Height() int64 + String() string + Time() time.Time + ValidateBasic() error } diff --git a/types/hashing.go b/types/hashing.go index 17162ee0e..931df948a 100644 --- a/types/hashing.go +++ b/types/hashing.go @@ -1,6 +1,6 @@ package types -// Hash returns ABCI-compatible hash of a header. + func (h *Header) Hash() [32]byte { var hash [32]byte abciHeader := ToABCIHeader(h) @@ -8,7 +8,7 @@ func (h *Header) Hash() [32]byte { return hash } -// Hash returns ABCI-compatible hash of a block. + func (b *Block) Hash() [32]byte { return b.Header.Hash() } diff --git a/types/instruction.go b/types/instruction.go index ebae50aa5..8f735f6d1 100644 --- a/types/instruction.go +++ b/types/instruction.go @@ -33,7 +33,7 @@ func LoadInstructionFromDisk(dir string) (Instruction, error) { var instruction Instruction filePath := filepath.Join(dir, instructionFileName) - data, err := os.ReadFile(filePath) // nolint:gosec + data, err := os.ReadFile(filePath) if err != nil { return Instruction{}, err } diff --git a/types/logger.go b/types/logger.go index e2c8fcdac..dfc89d708 100644 --- a/types/logger.go +++ b/types/logger.go @@ -1,6 +1,6 @@ package types -// Logger interface is compatible with Tendermint logger + type Logger interface { Debug(msg string, keyvals ...interface{}) Info(msg string, keyvals ...interface{}) diff --git a/types/pb/dymensionxyz/dymension/rollapp/errors.go b/types/pb/dymensionxyz/dymension/rollapp/errors.go index 2caa18964..1d9d3c05c 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/errors.go +++ b/types/pb/dymensionxyz/dymension/rollapp/errors.go @@ -1,13 +1,13 @@ package rollapp -// DONTCOVER + import ( errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// x/rollapp module sentinel errors + var ( ErrRollappExists = errorsmod.Register(ModuleName, 1000, "rollapp already exists") ErrInvalidInitialSequencer = errorsmod.Register(ModuleName, 1001, "empty initial sequencer") @@ -44,7 +44,7 @@ var ( ErrInvalidRequest = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid request") ErrInvalidVMType = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid vm type") - /* ------------------------------ fraud related ----------------------------- */ + ErrDisputeAlreadyFinalized = errorsmod.Register(ModuleName, 2000, "disputed height already finalized") ErrDisputeAlreadyReverted = errorsmod.Register(ModuleName, 2001, "disputed height already reverted") ErrWrongClientId = errorsmod.Register(ModuleName, 2002, "client id does not match the rollapp") diff --git a/types/pb/dymensionxyz/dymension/rollapp/events.go b/types/pb/dymensionxyz/dymension/rollapp/events.go index ae0f6e3d1..259a12f03 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/events.go +++ b/types/pb/dymensionxyz/dymension/rollapp/events.go @@ -11,12 +11,12 @@ const ( AttributeKeyDAPath = "da_path" AttributeKeyStatus = "status" - // EventTypeFraud is emitted when a fraud evidence is submitted + EventTypeFraud = "fraud_proposal" AttributeKeyFraudHeight = "fraud_height" AttributeKeyFraudSequencer = "fraud_sequencer" AttributeKeyClientID = "client_id" - // EventTypeTransferGenesisTransfersEnabled is when the bridge is enabled + EventTypeTransferGenesisTransfersEnabled = "transfer_genesis_transfers_enabled" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/keys.go b/types/pb/dymensionxyz/dymension/rollapp/keys.go index ca4e7b64c..61858ca0d 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/keys.go +++ b/types/pb/dymensionxyz/dymension/rollapp/keys.go @@ -1,19 +1,19 @@ package rollapp const ( - // ModuleName defines the module name + ModuleName = "rollapp" - // StoreKey defines the primary module store key + StoreKey = ModuleName - // RouterKey is the message route for slashing + RouterKey = ModuleName - // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName - // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_rollapp" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go index 11b1c7f3c..0dec4fc93 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go +++ b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go @@ -25,7 +25,7 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidAddress, "invalid creator address (%s)", err) } - // an update can't be with no BDs + if msg.NumBlocks == uint64(0) { return errorsmod.Wrap(ErrInvalidNumBlocks, "number of blocks can not be zero") } @@ -34,22 +34,22 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidNumBlocks, "numBlocks(%d) + startHeight(%d) exceeds max uint64", msg.NumBlocks, msg.StartHeight) } - // check to see that update contains all BDs + if uint64(len(msg.BDs.BD)) != msg.NumBlocks { return errorsmod.Wrapf(ErrInvalidNumBlocks, "number of blocks (%d) != number of block descriptors(%d)", msg.NumBlocks, len(msg.BDs.BD)) } - // check to see that startHeight is not zaro + if msg.StartHeight == 0 { return errorsmod.Wrapf(ErrWrongBlockHeight, "StartHeight must be greater than zero") } - // check that the blocks are sequential by height + for bdIndex := uint64(0); bdIndex < msg.NumBlocks; bdIndex += 1 { if msg.BDs.BD[bdIndex].Height != msg.StartHeight+bdIndex { return ErrInvalidBlockSequence } - // check to see stateRoot is a 32 byte array + if len(msg.BDs.BD[bdIndex].StateRoot) != 32 { return errorsmod.Wrapf(ErrInvalidStateRoot, "StateRoot of block high (%d) must be 32 byte array. But received (%d) bytes", msg.BDs.BD[bdIndex].Height, len(msg.BDs.BD[bdIndex].StateRoot)) diff --git a/types/pb/dymensionxyz/dymension/rollapp/params.go b/types/pb/dymensionxyz/dymension/rollapp/params.go index f12bb0f0b..64c9ad818 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/params.go +++ b/types/pb/dymensionxyz/dymension/rollapp/params.go @@ -2,7 +2,7 @@ package rollapp import "gopkg.in/yaml.v2" -// String implements the Stringer interface. + func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/pb/dymensionxyz/dymension/sequencer/events.go b/types/pb/dymensionxyz/dymension/sequencer/events.go index eb93ddc7a..01fd6ea51 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/events.go +++ b/types/pb/dymensionxyz/dymension/sequencer/events.go @@ -1,27 +1,27 @@ package sequencer -// Incentive module event types. + const ( - // EventTypeCreateSequencer is emitted when a sequencer is created + EventTypeCreateSequencer = "create_sequencer" AttributeKeyRollappId = "rollapp_id" AttributeKeySequencer = "sequencer" AttributeKeyBond = "bond" AttributeKeyProposer = "proposer" - // EventTypeUnbonding is emitted when a sequencer is unbonding + EventTypeUnbonding = "unbonding" AttributeKeyCompletionTime = "completion_time" - // EventTypeNoBondedSequencer is emitted when no bonded sequencer is found for a rollapp + EventTypeNoBondedSequencer = "no_bonded_sequencer" - // EventTypeProposerRotated is emitted when a proposer is rotated + EventTypeProposerRotated = "proposer_rotated" - // EventTypeUnbonded is emitted when a sequencer is unbonded + EventTypeUnbonded = "unbonded" - // EventTypeSlashed is emitted when a sequencer is slashed + EventTypeSlashed = "slashed" ) diff --git a/types/pb/dymensionxyz/dymension/sequencer/keys.go b/types/pb/dymensionxyz/dymension/sequencer/keys.go index c4b84447f..8bae0a8a5 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/keys.go +++ b/types/pb/dymensionxyz/dymension/sequencer/keys.go @@ -11,63 +11,63 @@ import ( var _ binary.ByteOrder const ( - // ModuleName defines the module name + ModuleName = "sequencer" - // StoreKey defines the primary module store key + StoreKey = ModuleName - // RouterKey is the message route for slashing + RouterKey = ModuleName - // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName - // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_sequencer" ) var ( - // KeySeparator defines the separator for keys + KeySeparator = "/" - // SequencersKeyPrefix is the prefix to retrieve all Sequencers by their address - SequencersKeyPrefix = []byte{0x00} // prefix/seqAddr + + SequencersKeyPrefix = []byte{0x00} - // SequencersByRollappKeyPrefix is the prefix to retrieve all SequencersByRollapp - SequencersByRollappKeyPrefix = []byte{0x01} // prefix/rollappId + + SequencersByRollappKeyPrefix = []byte{0x01} BondedSequencersKeyPrefix = []byte{0xa1} UnbondedSequencersKeyPrefix = []byte{0xa2} UnbondingSequencersKeyPrefix = []byte{0xa3} - UnbondingQueueKey = []byte{0x41} // prefix for the timestamps in unbonding queue + UnbondingQueueKey = []byte{0x41} ) -/* --------------------- specific sequencer address keys -------------------- */ + func SequencerKey(sequencerAddress string) []byte { sequencerAddrBytes := []byte(sequencerAddress) return []byte(fmt.Sprintf("%s%s%s", SequencersKeyPrefix, KeySeparator, sequencerAddrBytes)) } -// SequencerByRollappByStatusKey returns the store key to retrieve a SequencersByRollapp from the index fields + func SequencerByRollappByStatusKey(rollappId, seqAddr string, status OperatingStatus) []byte { return append(SequencersByRollappByStatusKey(rollappId, status), []byte(seqAddr)...) } -/* ------------------------- multiple sequencers keys ------------------------ */ + func SequencersKey() []byte { return SequencersKeyPrefix } -// SequencersByRollappKey returns the store key to retrieve a SequencersByRollapp from the index fields + func SequencersByRollappKey(rollappId string) []byte { rollappIdBytes := []byte(rollappId) return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKeyPrefix, KeySeparator, rollappIdBytes)) } -// SequencersByRollappByStatusKey returns the store key to retrieve a SequencersByRollappByStatus from the index fields + func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) []byte { - // Get the relevant key prefix based on the packet status + var prefix []byte switch status { case Bonded: @@ -81,16 +81,16 @@ func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) [] return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKey(rollappId), KeySeparator, prefix)) } -/* -------------------------- unbonding queue keys -------------------------- */ + func UnbondingQueueByTimeKey(endTime time.Time) []byte { timeBz := sdk.FormatTimeBytes(endTime) prefixL := len(UnbondingQueueKey) bz := make([]byte, prefixL+len(timeBz)) - // copy the prefix + copy(bz[:prefixL], UnbondingQueueKey) - // copy the encoded time bytes + copy(bz[prefixL:prefixL+len(timeBz)], timeBz) return bz diff --git a/types/pb/dymensionxyz/dymension/sequencer/params.go b/types/pb/dymensionxyz/dymension/sequencer/params.go index 5bf8971f0..de39b13dc 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/params.go +++ b/types/pb/dymensionxyz/dymension/sequencer/params.go @@ -4,7 +4,7 @@ import ( "gopkg.in/yaml.v2" ) -// String implements the Stringer interface. + func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/rollapp.go b/types/rollapp.go index f6fcd1d14..87951daf5 100644 --- a/types/rollapp.go +++ b/types/rollapp.go @@ -14,7 +14,7 @@ type Revision struct { func (r Rollapp) LatestRevision() Revision { if len(r.Revisions) == 0 { - // Revision 0 if no revisions exist. + return Revision{} } return r.Revisions[len(r.Revisions)-1] diff --git a/types/sequencer_set.go b/types/sequencer_set.go index 6d40142e7..1d294b2c7 100644 --- a/types/sequencer_set.go +++ b/types/sequencer_set.go @@ -13,18 +13,18 @@ import ( "github.com/tendermint/tendermint/types" ) -// Sequencer is a struct that holds the sequencer's information and tendermint validator. -// It is populated from the Hub on start and is periodically updated from the Hub polling. -// Uses tendermint's validator types for compatibility. + + + type Sequencer struct { - // SettlementAddress is the address of the sequencer in the settlement layer (bech32 string) + SettlementAddress string - // RewardAddr is the bech32-encoded sequencer's reward address + RewardAddr string - // WhitelistedRelayers is a list of the whitelisted relayer addresses. Addresses are bech32-encoded strings. + WhitelistedRelayers []string - // val is a tendermint validator type for compatibility. Holds the public key and cons address. + val types.Validator } @@ -45,8 +45,8 @@ func NewSequencer( } } -// IsEmpty returns true if the sequencer is empty -// we check if the pubkey is nil + + func (s Sequencer) IsEmpty() bool { return s.val.PubKey == nil } @@ -71,7 +71,7 @@ func (s Sequencer) TMValset() (*types.ValidatorSet, error) { return types.ValidatorSetFromExistingValidators(s.TMValidators()) } -// Hash returns tendermint compatible hash of the sequencer + func (s Sequencer) Hash() ([]byte, error) { vs, err := s.TMValset() if err != nil { @@ -80,7 +80,7 @@ func (s Sequencer) Hash() ([]byte, error) { return vs.Hash(), nil } -// MustHash returns tendermint compatible hash of the sequencer + func (s Sequencer) MustHash() []byte { h, err := s.Hash() if err != nil { @@ -89,7 +89,7 @@ func (s Sequencer) MustHash() []byte { return h } -// AnyConsPubKey returns sequencer's consensus public key represented as Cosmos proto.Any. + func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { val := s.TMValidator() pubKey, err := cryptocodec.FromTmPubKeyInterface(val.PubKey) @@ -103,7 +103,7 @@ func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { return anyPK, nil } -// MustFullHash returns a "full" hash of the sequencer that includes all fields of the Sequencer type. + func (s Sequencer) MustFullHash() []byte { h := sha256.New() h.Write([]byte(s.SettlementAddress)) @@ -115,14 +115,14 @@ func (s Sequencer) MustFullHash() []byte { return h.Sum(nil) } -// SequencerListRightOuterJoin returns a set of sequencers that are in B but not in A. -// Sequencer is identified by a hash of all of it's fields. -// -// Example 1: -// -// s1 = {seq1, seq2, seq3} -// s2 = { seq2, seq3, seq4} -// s1 * s2 = { seq4} + + + + + + + + func SequencerListRightOuterJoin(A, B Sequencers) Sequencers { lhsSet := make(map[string]struct{}) for _, s := range A { @@ -141,13 +141,13 @@ func (s Sequencer) String() string { return fmt.Sprintf("Sequencer{SettlementAddress: %s RewardAddr: %s WhitelistedRelayers: %v Validator: %s}", s.SettlementAddress, s.RewardAddr, s.WhitelistedRelayers, s.val.String()) } -// Sequencers is a list of sequencers. + type Sequencers []Sequencer -// SequencerSet is a set of rollapp sequencers. It holds the entire set of sequencers -// that were ever associated with the rollapp (including bonded/unbonded/unbonding). -// It is populated from the Hub on start and is periodically updated from the Hub polling. -// This type is thread-safe. + + + + type SequencerSet struct { mu sync.RWMutex sequencers Sequencers @@ -160,7 +160,7 @@ func NewSequencerSet(s ...Sequencer) *SequencerSet { } } -// Set sets the sequencers of the sequencer set. + func (s *SequencerSet) Set(sequencers Sequencers) { s.mu.Lock() defer s.mu.Unlock() @@ -173,7 +173,7 @@ func (s *SequencerSet) GetAll() Sequencers { return slices.Clone(s.sequencers) } -// GetByHash gets the sequencer by hash. It returns an error if the hash is not found in the sequencer set. + func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -185,8 +185,8 @@ func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { return Sequencer{}, false } -// GetByAddress returns the sequencer with the given settlement address. -// used when handling events from the settlement, where the settlement address is used + + func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -198,7 +198,7 @@ func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) return Sequencer{}, false } -// GetByConsAddress returns the sequencer with the given consensus address. + func (s *SequencerSet) GetByConsAddress(consAddr []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -214,9 +214,9 @@ func (s *SequencerSet) String() string { return fmt.Sprintf("SequencerSet: %v", s.sequencers) } -/* -------------------------- backward compatibility ------------------------- */ -// old dymint version used tendermint.ValidatorSet for sequencers -// these methods are used for backward compatibility + + + func NewSequencerFromValidator(val types.Validator) *Sequencer { return &Sequencer{ diff --git a/types/serialization.go b/types/serialization.go index a4e79bb8e..14965e6a0 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -12,17 +12,17 @@ import ( pb "github.com/dymensionxyz/dymint/types/pb/dymint" ) -// MarshalBinary encodes Block into binary form and returns it. + func (b *Block) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// MarshalBinary encodes Batch into binary form and returns it. + func (b *Batch) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Block into object. + func (b *Block) UnmarshalBinary(data []byte) error { var pBlock pb.Block err := pBlock.Unmarshal(data) @@ -33,7 +33,7 @@ func (b *Block) UnmarshalBinary(data []byte) error { return err } -// UnmarshalBinary decodes binary form of Batch into object. + func (b *Batch) UnmarshalBinary(data []byte) error { var pBatch pb.Batch err := pBatch.Unmarshal(data) @@ -44,12 +44,12 @@ func (b *Batch) UnmarshalBinary(data []byte) error { return err } -// MarshalBinary encodes Header into binary form and returns it. + func (h *Header) MarshalBinary() ([]byte, error) { return h.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Header into object. + func (h *Header) UnmarshalBinary(data []byte) error { var pHeader pb.Header err := pHeader.Unmarshal(data) @@ -60,17 +60,17 @@ func (h *Header) UnmarshalBinary(data []byte) error { return err } -// MarshalBinary encodes Data into binary form and returns it. + func (d *Data) MarshalBinary() ([]byte, error) { return d.ToProto().Marshal() } -// MarshalBinary encodes Commit into binary form and returns it. + func (c *Commit) MarshalBinary() ([]byte, error) { return c.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Commit into object. + func (c *Commit) UnmarshalBinary(data []byte) error { var pCommit pb.Commit err := pCommit.Unmarshal(data) @@ -81,7 +81,7 @@ func (c *Commit) UnmarshalBinary(data []byte) error { return err } -// ToProto converts Header into protobuf representation and returns it. + func (h *Header) ToProto() *pb.Header { return &pb.Header{ Version: &pb.Version{Block: h.Version.Block, App: h.Version.App}, @@ -101,7 +101,7 @@ func (h *Header) ToProto() *pb.Header { } } -// FromProto fills Header with data from its protobuf representation. + func (h *Header) FromProto(other *pb.Header) error { h.Version.Block = other.Version.Block h.Version.App = other.Version.App @@ -140,8 +140,8 @@ func (h *Header) FromProto(other *pb.Header) error { return nil } -// safeCopy copies bytes from src slice into dst slice if both have same size. -// It returns true if sizes of src and dst are the same. + + func safeCopy(dst, src []byte) bool { if len(src) != len(dst) { return false @@ -150,7 +150,7 @@ func safeCopy(dst, src []byte) bool { return true } -// ToProto converts Block into protobuf representation and returns it. + func (b *Block) ToProto() *pb.Block { return &pb.Block{ Header: b.Header.ToProto(), @@ -159,7 +159,7 @@ func (b *Block) ToProto() *pb.Block { } } -// ToProto converts Batch into protobuf representation and returns it. + func (b *Batch) ToProto() *pb.Batch { return &pb.Batch{ StartHeight: b.StartHeight(), @@ -169,7 +169,7 @@ func (b *Batch) ToProto() *pb.Batch { } } -// ToProto converts Data into protobuf representation and returns it. + func (d *Data) ToProto() *pb.Data { return &pb.Data{ Txs: txsToByteSlices(d.Txs), @@ -179,7 +179,7 @@ func (d *Data) ToProto() *pb.Data { } } -// FromProto fills Block with data from its protobuf representation. + func (b *Block) FromProto(other *pb.Block) error { err := b.Header.FromProto(other.Header) if err != nil { @@ -199,7 +199,7 @@ func (b *Block) FromProto(other *pb.Block) error { return nil } -// FromProto fills Batch with data from its protobuf representation. + func (b *Batch) FromProto(other *pb.Batch) error { n := len(other.Blocks) start := other.StartHeight @@ -215,7 +215,7 @@ func (b *Batch) FromProto(other *pb.Batch) error { return nil } -// ToProto converts Commit into protobuf representation and returns it. + func (c *Commit) ToProto() *pb.Commit { return &pb.Commit{ Height: c.Height, @@ -230,14 +230,14 @@ func (c *Commit) ToProto() *pb.Commit { } } -// FromProto fills Commit with data from its protobuf representation. + func (c *Commit) FromProto(other *pb.Commit) error { c.Height = other.Height if !safeCopy(c.HeaderHash[:], other.HeaderHash) { return errors.New("invalid length of HeaderHash") } c.Signatures = byteSlicesToSignatures(other.Signatures) - // For backwards compatibility with old state files that don't have this field. + if other.TmSignature != nil { c.TMSignature = types.CommitSig{ BlockIDFlag: types.BlockIDFlag(other.TmSignature.BlockIdFlag), @@ -250,7 +250,7 @@ func (c *Commit) FromProto(other *pb.Commit) error { return nil } -// ToProto converts State into protobuf representation and returns it. + func (s *State) ToProto() (*pb.State, error) { var proposerProto *pb.Sequencer proposer := s.GetProposer() @@ -265,25 +265,25 @@ func (s *State) ToProto() (*pb.State, error) { return &pb.State{ Version: &s.Version, ChainId: s.ChainID, - InitialHeight: int64(s.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 - LastBlockHeight: int64(s.Height()), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: int64(s.InitialHeight), + LastBlockHeight: int64(s.Height()), ConsensusParams: s.ConsensusParams, LastResultsHash: s.LastResultsHash[:], LastHeaderHash: s.LastHeaderHash[:], AppHash: s.AppHash[:], RollappParams: s.RollappParams, Proposer: proposerProto, - RevisionStartHeight: int64(s.RevisionStartHeight), //nolint:gosec // height is non-negative and falls in int64 + RevisionStartHeight: int64(s.RevisionStartHeight), }, nil } -// FromProto fills State with data from its protobuf representation. + func (s *State) FromProto(other *pb.State) error { s.Version = *other.Version s.ChainID = other.ChainId - s.InitialHeight = uint64(other.InitialHeight) //nolint:gosec // height is non-negative and falls in int64 - s.SetHeight(uint64(other.LastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 - s.RevisionStartHeight = uint64(other.RevisionStartHeight) //nolint:gosec // height is non-negative and falls in int64 + s.InitialHeight = uint64(other.InitialHeight) + s.SetHeight(uint64(other.LastBlockHeight)) + s.RevisionStartHeight = uint64(other.RevisionStartHeight) if other.Proposer != nil { proposer, err := SequencerFromProto(other.Proposer) if err != nil { @@ -291,7 +291,7 @@ func (s *State) FromProto(other *pb.State) error { } s.SetProposer(proposer) } else { - // proposer may be nil in the state + s.SetProposer(nil) } @@ -303,7 +303,7 @@ func (s *State) FromProto(other *pb.State) error { return nil } -// ToProto converts Sequencer into protobuf representation and returns it. + func (s *Sequencer) ToProto() (*pb.Sequencer, error) { if s == nil { return nil, fmt.Errorf("nil sequencer") @@ -320,7 +320,7 @@ func (s *Sequencer) ToProto() (*pb.Sequencer, error) { }, nil } -// SequencerFromProto fills Sequencer with data from its protobuf representation. + func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { if seq == nil { return nil, fmt.Errorf("nil sequencer") @@ -337,7 +337,7 @@ func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { }, nil } -// ToProto converts Sequencers into protobuf representation and returns it. + func (s Sequencers) ToProto() (*pb.SequencerSet, error) { seqs := make([]pb.Sequencer, len(s)) for i, seq := range s { @@ -350,7 +350,7 @@ func (s Sequencers) ToProto() (*pb.SequencerSet, error) { return &pb.SequencerSet{Sequencers: seqs}, nil } -// SequencersFromProto fills Sequencers with data from its protobuf representation. + func SequencersFromProto(s *pb.SequencerSet) (Sequencers, error) { if s == nil { return Sequencers{}, fmt.Errorf("nil sequencer set") @@ -389,7 +389,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { var ret []*abci.Evidence for _, e := range evidence.Evidence { for _, ae := range e.ABCI() { - ret = append(ret, &ae) //#nosec + ret = append(ret, &ae) } } return ret @@ -397,7 +397,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { func evidenceFromProto([]*abci.Evidence) EvidenceData { var ret EvidenceData - // TODO(tzdybal): right now Evidence is just an interface without implementations + return ret } @@ -423,7 +423,7 @@ func byteSlicesToSignatures(bytes [][]byte) []Signature { return sigs } -// Convert a list of blocks to a list of protobuf blocks. + func blocksToProto(blocks []*Block) []*pb.Block { pbBlocks := make([]*pb.Block, len(blocks)) for i, b := range blocks { @@ -432,7 +432,7 @@ func blocksToProto(blocks []*Block) []*pb.Block { return pbBlocks } -// protoToBlocks converts a list of protobuf blocks to a list of go struct blocks. + func protoToBlocks(pbBlocks []*pb.Block) []*Block { blocks := make([]*Block, len(pbBlocks)) for i, b := range pbBlocks { @@ -445,7 +445,7 @@ func protoToBlocks(pbBlocks []*pb.Block) []*Block { return blocks } -// commitsToProto converts a list of commits to a list of protobuf commits. + func commitsToProto(commits []*Commit) []*pb.Commit { pbCommits := make([]*pb.Commit, len(commits)) for i, c := range commits { @@ -454,7 +454,7 @@ func commitsToProto(commits []*Commit) []*pb.Commit { return pbCommits } -// protoToCommits converts a list of protobuf commits to a list of go struct commits. + func protoToCommits(pbCommits []*pb.Commit) []*Commit { commits := make([]*Commit, len(pbCommits)) for i, c := range pbCommits { diff --git a/types/state.go b/types/state.go index aa96bc985..cf1442b18 100644 --- a/types/state.go +++ b/types/state.go @@ -5,7 +5,7 @@ import ( "fmt" "sync/atomic" - // TODO(tzdybal): copy to local project? + tmcrypto "github.com/tendermint/tendermint/crypto" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -16,34 +16,34 @@ import ( const rollappparams_modulename = "rollappparams" -// State contains information about current state of the blockchain. + type State struct { Version tmstate.Version RevisionStartHeight uint64 - // immutable + ChainID string - InitialHeight uint64 // should be 1, not 0, when starting from height 1 + InitialHeight uint64 - // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight atomic.Uint64 - // Proposer is a sequencer that acts as a proposer. Can be nil if no proposer is set. + Proposer atomic.Pointer[Sequencer] - // Consensus parameters used for validating blocks. - // Changes returned by EndBlock and updated after Commit. + + ConsensusParams tmproto.ConsensusParams - // Merkle root of the results from executing prev block + LastResultsHash [32]byte - // the latest AppHash we've received from calling abci.Commit() + AppHash [32]byte - // New rollapp parameters . + RollappParams dymint.RollappParams - // LastHeaderHash is the hash of the last block header. + LastHeaderHash [32]byte } @@ -59,7 +59,7 @@ func (s *State) GetProposerPubKey() tmcrypto.PubKey { return proposer.PubKey() } -// GetProposerHash returns the hash of the proposer + func (s *State) GetProposerHash() []byte { proposer := s.Proposer.Load() if proposer == nil { @@ -68,7 +68,7 @@ func (s *State) GetProposerHash() []byte { return proposer.MustHash() } -// SetProposer sets the proposer. It may set the proposer to nil. + func (s *State) SetProposer(proposer *Sequencer) { s.Proposer.Store(proposer) } @@ -81,18 +81,18 @@ type RollappParams struct { Params *dymint.RollappParams } -// SetHeight sets the height saved in the Store if it is higher than the existing height -// returns OK if the value was updated successfully or did not need to be updated + + func (s *State) SetHeight(height uint64) { s.LastBlockHeight.Store(height) } -// Height returns height of the highest block saved in the Store. + func (s *State) Height() uint64 { return s.LastBlockHeight.Load() } -// NextHeight returns the next height that expected to be stored in store. + func (s *State) NextHeight() uint64 { if s.IsGenesis() { return s.InitialHeight @@ -100,7 +100,7 @@ func (s *State) NextHeight() uint64 { return s.Height() + 1 } -// SetRollappParamsFromGenesis sets the rollapp consensus params from genesis + func (s *State) SetRollappParamsFromGenesis(appState json.RawMessage) error { var objmap map[string]json.RawMessage err := json.Unmarshal(appState, &objmap) diff --git a/types/tx.go b/types/tx.go index 0565c5a47..fe4d1f6fa 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,20 +6,20 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" ) -// Tx represents transaction. + type Tx []byte -// Txs represents a slice of transactions. + type Txs []Tx -// Hash computes the TMHASH hash of the wire encoded transaction. + func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } -// Proof returns a simple merkle proof for this node. -// Panics if i < 0 or i >= len(txs) -// TODO: optimize this! + + + func (txs Txs) Proof(i int) TxProof { l := len(txs) bzs := make([][]byte, l) @@ -35,7 +35,7 @@ func (txs Txs) Proof(i int) TxProof { } } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. + type TxProof struct { RootHash tmbytes.HexBytes `json:"root_hash"` Data Tx `json:"data"` diff --git a/types/validation.go b/types/validation.go index aa5bedae4..3b7c37f48 100644 --- a/types/validation.go +++ b/types/validation.go @@ -21,7 +21,7 @@ func ValidateProposedTransition(state *State, block *Block, commit *Commit, prop return nil } -// ValidateBasic performs basic validation of a block. + func (b *Block) ValidateBasic() error { err := b.Header.ValidateBasic() if err != nil { @@ -93,7 +93,7 @@ func (b *Block) ValidateWithState(state *State) error { return nil } -// ValidateBasic performs basic validation of a header. + func (h *Header) ValidateBasic() error { if len(h.ProposerAddress) == 0 { return ErrEmptyProposerAddress @@ -102,13 +102,13 @@ func (h *Header) ValidateBasic() error { return nil } -// ValidateBasic performs basic validation of block data. -// Actually it's a placeholder, because nothing is checked. + + func (d *Data) ValidateBasic() error { return nil } -// ValidateBasic performs basic validation of a commit. + func (c *Commit) ValidateBasic() error { if c.Height > 0 { if len(c.Signatures) != 1 { @@ -133,7 +133,7 @@ func (c *Commit) ValidateWithHeader(proposerPubKey tmcrypto.PubKey, header *Head return err } - // commit is validated to have single signature + if !proposerPubKey.VerifySignature(abciHeaderBytes, c.Signatures[0]) { return NewErrInvalidSignatureFraud(ErrInvalidSignature, header, c) } diff --git a/utils/atomic/funcs.go b/utils/atomic/funcs.go index 1812d0959..d6cca097e 100644 --- a/utils/atomic/funcs.go +++ b/utils/atomic/funcs.go @@ -4,12 +4,10 @@ import ( "sync/atomic" ) -/* -TODO: move to sdk-utils -*/ -// Uint64Sub does x := x-y and returns the new value of x + + func Uint64Sub(x *atomic.Uint64, y uint64) uint64 { - // Uses math + return x.Add(^(y - 1)) } diff --git a/utils/channel/funcs.go b/utils/channel/funcs.go index 614414a3f..2513314dd 100644 --- a/utils/channel/funcs.go +++ b/utils/channel/funcs.go @@ -1,7 +1,7 @@ package channel -// DrainForever will drain the channels in separate go routines in a loop forever -// Intended for tests only + + func DrainForever[T any](chs ...<-chan T) { for _, ch := range chs { go func() { @@ -12,17 +12,17 @@ func DrainForever[T any](chs ...<-chan T) { } } -// Nudger can be used to make a goroutine ('A') sleep, and have another goroutine ('B') wake him up -// A will not block if B is not asleep. + + type Nudger struct { - C chan struct{} // Receive on C to sleep + C chan struct{} } func NewNudger() *Nudger { return &Nudger{make(chan struct{})} } -// Nudge wakes up the waiting thread if any. Non blocking. + func (w Nudger) Nudge() { select { case w.C <- struct{}{}: diff --git a/utils/errors/err_group.go b/utils/errors/err_group.go index c4d82409a..6ddb5d414 100644 --- a/utils/errors/err_group.go +++ b/utils/errors/err_group.go @@ -5,14 +5,12 @@ import ( "golang.org/x/sync/errgroup" ) -/* -TODO: move to sdk-utils -*/ - -// ErrGroupGoLog calls eg.Go on the errgroup but it will log the error immediately when it occurs -// instead of waiting for all goroutines in the group to finish first. This has the advantage of making sure all -// errors are logged, not just the first one, and it is more immediate. Also, it is guaranteed, in case that -// of the goroutines is not properly context aware. + + + + + + func ErrGroupGoLog(eg *errgroup.Group, logger types.Logger, fn func() error) { eg.Go(func() error { err := fn() diff --git a/utils/event/funcs.go b/utils/event/funcs.go index 8b76b7ce0..000cbf3f0 100644 --- a/utils/event/funcs.go +++ b/utils/event/funcs.go @@ -12,9 +12,9 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) -// MustSubscribe subscribes to events and sends back a callback -// clientID is essentially the subscriber id, see https://pkg.go.dev/github.com/tendermint/tendermint/libs/pubsub#pkg-overview -// - will not panic on context cancel or deadline exceeded + + + func MustSubscribe( ctx context.Context, pubsubServer *pubsub.Server, @@ -46,7 +46,7 @@ func MustSubscribe( } } -// MustPublish submits an event or panics - will not panic on context cancel or deadline exceeded + func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface{}, events map[string][]string) { err := pubsubServer.PublishWithEvents(ctx, msg, events) if err != nil && !errors.Is(err, context.Canceled) { @@ -54,7 +54,7 @@ func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface } } -// QueryFor returns a query for the given event. + func QueryFor(eventTypeKey, eventType string) tmpubsub.Query { return tmquery.MustParse(fmt.Sprintf("%s='%s'", eventTypeKey, eventType)) } diff --git a/utils/queue/queue.go b/utils/queue/queue.go index 17b760ecd..4600ddd86 100644 --- a/utils/queue/queue.go +++ b/utils/queue/queue.go @@ -5,40 +5,40 @@ import ( "strings" ) -// Queue holds elements in an array-list. -// This implementation is NOT thread-safe! + + type Queue[T any] struct { elements []T } -// FromSlice instantiates a new queue from the given slice. + func FromSlice[T any](s []T) *Queue[T] { return &Queue[T]{elements: s} } -// New instantiates a new empty queue + func New[T any]() *Queue[T] { return &Queue[T]{elements: make([]T, 0)} } -// Enqueue adds a value to the end of the queue + func (q *Queue[T]) Enqueue(values ...T) { q.elements = append(q.elements, values...) } -// DequeueAll returns all queued elements (FIFO order) and cleans the entire queue. + func (q *Queue[T]) DequeueAll() []T { values := q.elements q.elements = make([]T, 0) return values } -// Size returns number of elements within the queue. + func (q *Queue[T]) Size() int { return len(q.elements) } -// String returns a string representation. + func (q *Queue[T]) String() string { str := "Queue[" values := []string{} diff --git a/utils/retry/backoff.go b/utils/retry/backoff.go index 05d7ac53d..b9276edaa 100644 --- a/utils/retry/backoff.go +++ b/utils/retry/backoff.go @@ -10,14 +10,14 @@ const ( defaultBackoffFactor = 2 ) -// BackoffConfig is a configuration for a backoff, it's used to create new instances + type BackoffConfig struct { InitialDelay time.Duration `json:"initial_delay"` MaxDelay time.Duration `json:"max_delay"` GrowthFactor float64 `json:"growth_factor"` } -// Backoff creates a new Backoff instance with the configuration (starting at 0 attempts made so far) + func (c BackoffConfig) Backoff() Backoff { return Backoff{ delay: c.InitialDelay, @@ -40,16 +40,16 @@ func WithInitialDelay(d time.Duration) BackoffOption { } } -// WithMaxDelay sets the maximum delay for the backoff. The delay will not exceed this value. -// Set 0 to disable the maximum delay. + + func WithMaxDelay(d time.Duration) BackoffOption { return func(b *BackoffConfig) { b.MaxDelay = d } } -// WithGrowthFactor sets the growth factor for the backoff. The delay will be multiplied by this factor on each call to Delay. -// The factor should be greater than 1.0 + + func WithGrowthFactor(x float64) BackoffOption { return func(b *BackoffConfig) { b.GrowthFactor = x @@ -68,7 +68,7 @@ func NewBackoffConfig(opts ...BackoffOption) BackoffConfig { return ret } -// Delay returns the current delay. The subsequent delay will be increased by the growth factor up to the maximum. + func (b *Backoff) Delay() time.Duration { ret := b.delay b.delay = time.Duration(float64(b.delay) * b.growthFactor) @@ -78,7 +78,7 @@ func (b *Backoff) Delay() time.Duration { return ret } -// Sleep sleeps for the current delay. The subsequent delay will be increased by the growth factor up to the maximum. + func (b *Backoff) Sleep() { time.Sleep(b.Delay()) } diff --git a/utils/retry/doc.go b/utils/retry/doc.go index fe69a7266..6d41b0f16 100644 --- a/utils/retry/doc.go +++ b/utils/retry/doc.go @@ -1,4 +1,4 @@ -// Package retry shall be used alongside "github.com/avast/retry-go/v4" for simple retry patterns -// which the avast package makes difficult. -// Methods in here should be simple and not warrant another dependency. + + + package retry diff --git a/version/version.go b/version/version.go index acbae16e8..d461e5b9b 100644 --- a/version/version.go +++ b/version/version.go @@ -15,5 +15,5 @@ func GetDRSVersion() (uint32, error) { if err != nil { return uint32(0), fmt.Errorf("converting DRS version to int: %v", err) } - return uint32(currentDRS), nil //nolint:gosec // DRS is uint32 + return uint32(currentDRS), nil } From 522cf1cde9b2949d96b6a8e87b2664fce0d9d070 Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:51:35 +0000 Subject: [PATCH 2/4] restore --- block/balance.go | 2 +- block/block.go | 110 ++--- block/block_cache.go | 2 +- block/consensus.go | 2 +- block/executor.go | 44 +- block/fork.go | 98 ++-- block/fraud.go | 12 +- block/initchain.go | 12 +- block/manager.go | 146 +++--- block/modes.go | 40 +- block/p2p.go | 16 +- block/produce.go | 126 ++--- block/pruning.go | 16 +- block/retriever.go | 32 +- block/sequencers.go | 58 +-- block/slvalidator.go | 70 +-- block/state.go | 50 +- block/submit.go | 94 ++-- block/sync.go | 44 +- block/validate.go | 12 +- cmd/dymint/commands/init.go | 8 +- cmd/dymint/commands/root.go | 8 +- cmd/dymint/commands/show_node_id.go | 4 +- cmd/dymint/commands/show_sequencer.go | 4 +- cmd/dymint/commands/start.go | 12 +- cmd/dymint/main.go | 2 +- config/config.go | 62 +-- config/defaults.go | 10 +- config/flags.go | 10 +- config/p2p.go | 20 +- config/rpc.go | 46 +- config/toml.go | 16 +- conv/config.go | 15 +- conv/crypto.go | 2 +- da/avail/avail.go | 68 +-- da/celestia/celestia.go | 68 +-- da/celestia/config.go | 6 +- da/celestia/mock/messages.go | 22 +- da/celestia/mock/server.go | 8 +- da/celestia/rpc.go | 18 +- da/celestia/types/rpc.go | 6 +- da/celestia/types/types.go | 66 +-- da/da.go | 126 ++--- da/errors.go | 22 +- da/grpc/grpc.go | 30 +- da/grpc/mockserv/mockserv.go | 2 +- da/local/local.go | 36 +- da/registry/registry.go | 6 +- indexers/blockindexer/block.go | 14 +- indexers/blockindexer/kv/kv.go | 140 +++--- indexers/blockindexer/null/null.go | 2 +- indexers/blockindexer/query_range.go | 30 +- indexers/txindex/indexer.go | 26 +- indexers/txindex/indexer_service.go | 30 +- indexers/txindex/kv/kv.go | 206 ++++---- indexers/txindex/kv/utils.go | 2 +- indexers/txindex/null/null.go | 8 +- mempool/cache.go | 32 +- mempool/clist/clist.go | 148 +++--- mempool/ids.go | 2 +- mempool/mempool.go | 128 ++--- mempool/metrics.go | 40 +- mempool/mock/mempool.go | 2 +- mempool/tx.go | 12 +- mempool/v1/mempool.go | 448 +++++++++--------- mempool/v1/tx.go | 38 +- .../dymint/block/mock_ExecutorI.go | 124 ++--- .../dymint/block/mock_FraudHandler.go | 18 +- .../dymint/da/avail/mock_SubstrateApiI.go | 380 +++++++-------- .../celestia/types/mock_CelestiaRPCClient.go | 94 ++-- .../da/mock_DataAvailabilityLayerClient.go | 76 +-- .../dymint/p2p/mock_ProposerGetter.go | 20 +- .../dymint/p2p/mock_StateGetter.go | 20 +- .../settlement/dymension/mock_CosmosClient.go | 98 ++-- .../dymint/settlement/mock_ClientI.go | 142 +++--- .../dymensionxyz/dymint/store/mock_Store.go | 272 +++++------ .../sequencer/types/mock_QueryClient.go | 92 ++-- .../dymension/rollapp/mock_QueryClient.go | 128 ++--- .../dymension/sequencer/mock_QueryClient.go | 104 ++-- .../tendermint/abci/types/mock_Application.go | 110 ++--- .../tendermint/proxy/mock_AppConnConsensus.go | 60 +-- .../tendermint/proxy/mock_AppConns.go | 94 ++-- node/events/types.go | 12 +- node/mempool/mempool.go | 18 +- node/node.go | 46 +- p2p/block.go | 22 +- p2p/block_sync.go | 42 +- p2p/block_sync_dag.go | 30 +- p2p/blocks_received.go | 12 +- p2p/client.go | 140 +++--- p2p/events.go | 18 +- p2p/gossip.go | 26 +- p2p/validator.go | 22 +- rpc/client/client.go | 233 ++++----- rpc/client/utils.go | 12 +- rpc/json/handler.go | 30 +- rpc/json/service.go | 22 +- rpc/json/types.go | 22 +- rpc/json/ws.go | 8 +- rpc/middleware/client.go | 8 +- rpc/middleware/registry.go | 12 +- rpc/middleware/status.go | 2 +- rpc/server.go | 30 +- settlement/config.go | 6 +- settlement/dymension/cosmosclient.go | 10 +- settlement/dymension/dymension.go | 98 ++-- settlement/dymension/events.go | 20 +- settlement/dymension/options.go | 12 +- settlement/dymension/utils.go | 8 +- settlement/errors.go | 2 +- settlement/events.go | 14 +- settlement/grpc/grpc.go | 42 +- settlement/local/local.go | 50 +- settlement/registry/registry.go | 14 +- settlement/settlement.go | 64 +-- store/badger.go | 88 ++-- store/prefix.go | 26 +- store/pruning.go | 8 +- store/store.go | 48 +- store/storeIface.go | 54 +-- test/loadtime/cmd/load/main.go | 22 +- test/loadtime/cmd/report/main.go | 6 +- test/loadtime/payload/payload.go | 30 +- test/loadtime/report/report.go | 74 +-- testutil/block.go | 14 +- testutil/logger.go | 24 +- testutil/mocks.go | 52 +- testutil/node.go | 6 +- testutil/p2p.go | 6 +- testutil/rpc.go | 4 +- testutil/types.go | 26 +- types/batch.go | 20 +- types/block.go | 66 +-- types/block_source.go | 2 +- types/conv.go | 32 +- types/errors.go | 56 +-- types/evidence.go | 22 +- types/hashing.go | 4 +- types/instruction.go | 2 +- types/logger.go | 2 +- .../dymensionxyz/dymension/rollapp/errors.go | 6 +- .../dymensionxyz/dymension/rollapp/events.go | 4 +- .../pb/dymensionxyz/dymension/rollapp/keys.go | 10 +- .../dymension/rollapp/message_update_state.go | 10 +- .../dymensionxyz/dymension/rollapp/params.go | 2 +- .../dymension/sequencer/events.go | 14 +- .../dymensionxyz/dymension/sequencer/keys.go | 40 +- .../dymension/sequencer/params.go | 2 +- types/rollapp.go | 2 +- types/sequencer_set.go | 68 +-- types/serialization.go | 80 ++-- types/state.go | 38 +- types/tx.go | 14 +- types/validation.go | 12 +- utils/atomic/funcs.go | 8 +- utils/channel/funcs.go | 12 +- utils/errors/err_group.go | 14 +- utils/event/funcs.go | 10 +- utils/queue/queue.go | 16 +- utils/retry/backoff.go | 16 +- utils/retry/doc.go | 6 +- version/version.go | 2 +- 162 files changed, 3530 insertions(+), 3486 deletions(-) diff --git a/block/balance.go b/block/balance.go index f77b518f3..9c0e301fe 100644 --- a/block/balance.go +++ b/block/balance.go @@ -14,7 +14,7 @@ import ( const CheckBalancesInterval = 3 * time.Minute - +// MonitorBalances checks the balances of the node and updates the gauges for prometheus func (m *Manager) MonitorBalances(ctx context.Context) error { ticker := time.NewTicker(CheckBalancesInterval) defer ticker.Stop() diff --git a/block/block.go b/block/block.go index b8a6f3913..4b4562794 100644 --- a/block/block.go +++ b/block/block.go @@ -11,12 +11,12 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// applyBlockWithFraudHandling calls applyBlock and validateBlockBeforeApply with fraud handling. func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { validateWithFraud := func() error { if err := m.validateBlockBeforeApply(block, commit); err != nil { m.blockCache.Delete(block.Header.Height) - + // TODO: can we take an action here such as dropping the peer / reducing their reputation? return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) } @@ -29,27 +29,27 @@ func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types. err := validateWithFraud() if errors.Is(err, gerrc.ErrFault) { - - - - + // Here we handle the fault by calling the fraud handler. + // FraudHandler is an interface that defines a method to handle faults. Implement this interface to handle faults + // in specific ways. For example, once a fault is detected, it publishes a DataHealthStatus event to the + // pubsub which sets the node in a frozen state. m.FraudHandler.HandleFault(m.Ctx, err) } return err } - - - - - - +// applyBlock applies the block to the store and the abci app. +// Contract: block and commit must be validated before calling this function! +// steps: save block -> execute block with app -> update state -> commit block to app -> update state's height and commit result. +// As the entire process can't be atomic we need to make sure the following condition apply before +// - block height is the expected block height on the store (height + 1). +// - block height is the expected block height on the app (last block height + 1). func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { var retainHeight int64 - - + // TODO: add switch case to have defined behavior for each case. + // validate block height if block.Header.Height != m.State.NextHeight() { return types.ErrInvalidBlockHeight } @@ -58,13 +58,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("Applying block", "height", block.Header.Height, "source", blockMetaData.Source.String()) - + // Check if the app's last block height is the same as the currently produced block height isBlockAlreadyApplied, err := m.isHeightAlreadyApplied(block.Header.Height) if err != nil { return fmt.Errorf("check if block is already applied: %w", err) } - - + // In case the following true, it means we crashed after the app commit but before updating the state + // In that case we'll want to align the state with the app commit result, as if the block was applied. if isBlockAlreadyApplied { err := m.UpdateStateFromApp(block.Header.Hash()) if err != nil { @@ -73,7 +73,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Info("updated state from app commit", "height", block.Header.Height) } else { var appHash []byte - + // Start applying the block assuming no inconsistency was found. _, err = m.Store.SaveBlock(block, commit, nil) if err != nil { return fmt.Errorf("save block: %w", err) @@ -104,15 +104,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("add drs version: %w", err) } - + // Commit block to app appHash, retainHeight, err = m.Executor.Commit(m.State, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } - - - + // Prune old heights, if requested by ABCI app. + // retainHeight is determined by currentHeight - min-retain-blocks (app.toml config). + // Unless max_age_num_blocks in consensus params is higher than min-retain-block, then max_age_num_blocks will be used instead of min-retain-blocks. if 0 < retainHeight { select { @@ -121,25 +121,25 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("pruning channel full. skipping pruning", "retainHeight", retainHeight) } } - - + // Update the state with the new app hash, and store height from the commit. + // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, block.Header.Hash()) } - + // save last block time used to calculate batch skew time m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) - - - - - - - - - - - + // Update the store: + // 1. Save the proposer for the current height to the store. + // 2. Update the proposer in the state in case of rotation. + // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). + // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). + // here, (3) helps properly handle reboots (specifically when there's rotation). + // If reboot happens after block H (which rotates seqA -> seqB): + // - Block H+1 will be signed by seqB. + // - The state must have seqB as proposer. + + // Proposer cannot be empty while applying the block proposer := m.State.GetProposer() if proposer == nil { return fmt.Errorf("logic error: got nil proposer while applying block") @@ -147,28 +147,28 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta batch := m.Store.NewBatch() - - + // 1. Save the proposer for the current height to the store. + // Proposer in the store is used for RPC queries. batch, err = m.Store.SaveProposer(block.Header.Height, *proposer, batch) if err != nil { return fmt.Errorf("save proposer: %w", err) } - + // 2. Update the proposer in the state in case of rotation happened on the rollapp level (not necessarily on the hub yet). isProposerUpdated := m.Executor.UpdateProposerFromBlock(m.State, m.Sequencers, block) - + // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). batch, err = m.Store.SaveState(m.State, batch) if err != nil { return fmt.Errorf("update state: %w", err) } - - - - - - + // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). + // The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary + // UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set + // is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, + // on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. + // Though single-time duplication it's not a big deal. if len(blockMetaData.SequencerSet) != 0 { batch, err = m.Store.SaveLastBlockSequencerSet(blockMetaData.SequencerSet, batch) if err != nil { @@ -185,16 +185,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.blockCache.Delete(block.Header.Height) - + // validate whether configuration params and rollapp consensus params keep in line, after rollapp params are updated from the responses received in the block execution err = m.ValidateConfigWithRollappParams() if err != nil { return err } - - - - + // Check if there was an Update for the proposer and if I am the new proposer. + // If so, restart so I can start as the proposer. + // For current proposer, we don't want to restart because we still need to send the last batch. + // This will be done as part of the `rotate` function. if isProposerUpdated && m.AmIProposerOnRollapp() { panic("I'm the new Proposer now. restarting as a proposer") } @@ -202,16 +202,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return nil } - +// isHeightAlreadyApplied checks if the block height is already applied to the app. func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return false, errorsmod.Wrap(err, "get app info") } - isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight + isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight //nolint:gosec // LastBlockHeight is always positive - + // TODO: add switch case to validate better the current app state return isBlockAlreadyApplied, nil } @@ -240,7 +240,7 @@ func (m *Manager) attemptApplyCachedBlocks() error { return nil } - +// This function validates the block and commit against the state before applying it. func (m *Manager) validateBlockBeforeApply(block *types.Block, commit *types.Commit) error { return types.ValidateProposedTransition(m.State, block, commit, m.State.GetProposerPubKey()) } diff --git a/block/block_cache.go b/block/block_cache.go index b74176d9e..b224f69fc 100644 --- a/block/block_cache.go +++ b/block/block_cache.go @@ -5,7 +5,7 @@ import ( ) type Cache struct { - + // concurrency managed by Manager.retrieverMu mutex cache map[uint64]types.CachedBlock } diff --git a/block/consensus.go b/block/consensus.go index 87cc6c39d..94ce55f15 100644 --- a/block/consensus.go +++ b/block/consensus.go @@ -47,7 +47,7 @@ func ConsensusMsgSigner(m proto.Message) (sdk.AccAddress, error) { } } - +// ConsensusMsgsOnSequencerSetUpdate forms a list of consensus messages to handle the sequencer set update. func ConsensusMsgsOnSequencerSetUpdate(newSequencers []types.Sequencer) ([]proto.Message, error) { msgs := make([]proto.Message, 0, len(newSequencers)) for _, s := range newSequencers { diff --git a/block/executor.go b/block/executor.go index 9f7d72f8b..f3a1421c5 100644 --- a/block/executor.go +++ b/block/executor.go @@ -19,7 +19,7 @@ import ( protoutils "github.com/dymensionxyz/dymint/utils/proto" ) - +// default minimum block max size allowed. not specific reason to set it to 10K, but we need to avoid no transactions can be included in a block. const minBlockMaxBytes = 10000 type ExecutorI interface { @@ -33,7 +33,7 @@ type ExecutorI interface { UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool - + /* Consensus Messages */ AddConsensusMsgs(...proto2.Message) GetConsensusMsgs() []proto2.Message @@ -41,7 +41,7 @@ type ExecutorI interface { var _ ExecutorI = new(Executor) - +// Executor creates and applies blocks and maintains state. type Executor struct { localAddress []byte chainID string @@ -55,8 +55,8 @@ type Executor struct { logger types.Logger } - - +// NewExecutor creates new instance of BlockExecutor. +// localAddress will be used in sequencer mode only. func NewExecutor( localAddress []byte, chainID string, @@ -79,23 +79,23 @@ func NewExecutor( return &be, nil } - - +// AddConsensusMsgs adds new consensus msgs to the queue. +// The method is thread-safe. func (e *Executor) AddConsensusMsgs(msgs ...proto2.Message) { e.consensusMsgQueue.Add(msgs...) } - - +// GetConsensusMsgs dequeues consensus msgs from the queue. +// The method is thread-safe. func (e *Executor) GetConsensusMsgs() []proto2.Message { return e.consensusMsgQueue.Get() } - +// InitChain calls InitChainSync using consensus connection to app. func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string, valset []*tmtypes.Validator) (*abci.ResponseInitChain, error) { valUpdates := abci.ValidatorUpdates{} - + // prepare the validator updates as expected by the ABCI app for _, validator := range valset { tmkey, err := tmcrypto.PubKeyToProto(validator.PubKey) if err != nil { @@ -136,7 +136,7 @@ func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string }) } - +// CreateBlock reaps transactions from mempool and builds a block. func (e *Executor) CreateBlock( height uint64, lastCommit *types.Commit, @@ -144,8 +144,8 @@ func (e *Executor) CreateBlock( state *types.State, maxBlockDataSizeBytes uint64, ) *types.Block { - maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) - mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) + maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) //nolint:gosec // MaxBytes is always positive + mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) //nolint:gosec // size is always positive and falls in int64 block := &types.Block{ Header: types.Header{ @@ -178,7 +178,7 @@ func (e *Executor) CreateBlock( return block } - +// Commit commits the block func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { @@ -193,7 +193,7 @@ func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate. return appHash, retainHeight, nil } - +// GetAppInfo returns the latest AppInfo from the proxyApp. func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } @@ -214,7 +214,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas - err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) + err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return nil, 0, err } @@ -224,7 +224,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* return resp.Data, resp.RetainHeight, err } - +// ExecuteBlock executes the block and returns the ABCIResponses. Block should be valid (passed validation checks). func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, error) { abciResponses := new(tmstate.ABCIResponses) abciResponses.DeliverTxs = make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) @@ -273,7 +273,7 @@ func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, err } } - abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) + abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return nil, err } @@ -305,14 +305,14 @@ func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block for _, ev := range abciBlock.Evidence.Evidence { err = multierr.Append(err, e.eventBus.PublishEventNewEvidence(tmtypes.EventDataNewEvidence{ Evidence: ev, - Height: int64(block.Header.Height), + Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 })) } for i, dtx := range resp.DeliverTxs { err = multierr.Append(err, e.eventBus.PublishEventTx(tmtypes.EventDataTx{ TxResult: abci.TxResult{ - Height: int64(block.Header.Height), - Index: uint32(i), + Height: int64(block.Header.Height), //nolint:gosec // block height is within int64 range + Index: uint32(i), //nolint:gosec // num of deliver txs is less than 2^32 Tx: abciBlock.Data.Txs[i], Result: *dtx, }, diff --git a/block/fork.go b/block/fork.go index 5f1ff5878..c559ba132 100644 --- a/block/fork.go +++ b/block/fork.go @@ -20,9 +20,9 @@ const ( ForkMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." ) - +// MonitorForkUpdateLoop monitors the hub for fork updates in a loop func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { - ticker := time.NewTicker(ForkMonitorInterval) + ticker := time.NewTicker(ForkMonitorInterval) // TODO make this configurable defer ticker.Stop() for { @@ -37,7 +37,7 @@ func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { } } - +// checkForkUpdate checks if the hub has a fork update func (m *Manager) checkForkUpdate(msg string) error { defer m.forkMu.Unlock() m.forkMu.Lock() @@ -69,7 +69,7 @@ func (m *Manager) checkForkUpdate(msg string) error { return nil } - +// createInstruction returns instruction with fork information func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Instruction, error) { obsoleteDrs, err := m.SLClient.GetObsoleteDrs() if err != nil { @@ -85,11 +85,11 @@ func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Inst return instruction, nil } - - - - - +// shouldStopNode determines if a rollapp node should be stopped based on revision criteria. +// +// This method checks two conditions to decide if a node should be stopped: +// 1. If the next state height is greater than or equal to the rollapp's revision start height. +// 2. If the block's app version (equivalent to revision) is less than the rollapp's revision func shouldStopNode( expectedRevision types.Revision, nextHeight uint64, @@ -98,7 +98,7 @@ func shouldStopNode( return nextHeight >= expectedRevision.StartHeight && actualRevisionNumber < expectedRevision.Number } - +// getRevisionFromSL returns revision data for the specific height func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { rollapp, err := m.SLClient.GetRollapp() if err != nil { @@ -107,26 +107,26 @@ func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { return rollapp.GetRevisionForHeight(height), nil } - +// doFork creates fork blocks and submits a new batch with them func (m *Manager) doFork(instruction types.Instruction) error { - + // if fork (two) blocks are not produced and applied yet, produce them if m.State.Height() < instruction.RevisionStartHeight+1 { - + // add consensus msgs to upgrade DRS to running node version (msg is created in all cases and RDK will upgrade if necessary). If returns error if running version is deprecated. consensusMsgs, err := m.prepareDRSUpgradeMessages(instruction.FaultyDRS) if err != nil { return fmt.Errorf("prepare DRS upgrade messages: %v", err) } - + // add consensus msg to bump the account sequences in all fork cases consensusMsgs = append(consensusMsgs, &sequencers.MsgBumpAccountSequences{Authority: authtypes.NewModuleAddress("sequencers").String()}) - + // create fork blocks err = m.createForkBlocks(instruction, consensusMsgs) if err != nil { return fmt.Errorf("validate fork blocks: %v", err) } } - + // submit fork batch including two fork blocks if err := m.submitForkBatch(instruction.RevisionStartHeight); err != nil { return fmt.Errorf("submit fork batch: %v", err) } @@ -134,13 +134,13 @@ func (m *Manager) doFork(instruction types.Instruction) error { return nil } - - - - - - - +// prepareDRSUpgradeMessages prepares consensus messages for DRS upgrades. +// It performs version validation and generates the necessary upgrade messages for the sequencer. +// +// The function implements the following logic: +// - If no faulty DRS version is provided (faultyDRS is nil), returns no messages +// - Validates the current DRS version against the potentially faulty version +// - Generates an upgrade message with the current valid DRS version func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Message, error) { drsVersion, err := version.GetDRSVersion() if err != nil { @@ -161,13 +161,13 @@ func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Messa }, nil } - - - +// create the first two blocks of the new revision +// the first one should have a cons message(s) +// both should not have tx's func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs []proto.Message) error { nextHeight := m.State.NextHeight() - + // Revise already created fork blocks for h := instruction.RevisionStartHeight; h < nextHeight; h++ { b, err := m.Store.LoadBlock(h) if err != nil { @@ -183,7 +183,7 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs } } - + // create two empty blocks including consensus msgs in the first one for h := nextHeight; h < instruction.RevisionStartHeight+2; h++ { if h == instruction.RevisionStartHeight { m.Executor.AddConsensusMsgs(consensusMsgs...) @@ -201,13 +201,13 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs return nil } - - - - - - - +// submitForkBatch verifies and, if necessary, creates a batch at the specified height. +// This function is critical for maintaining batch consistency in the blockchain while +// preventing duplicate batch submissions. +// +// The function performs the following operations: +// 1. Checks for an existing batch at the specified height via SLClient +// 2. If no batch exists, creates and submits a new one func (m *Manager) submitForkBatch(height uint64) error { resp, err := m.SLClient.GetBatchAtHeight(height) if err != nil && !errors.Is(err, gerrc.ErrNotFound) { @@ -225,62 +225,62 @@ func (m *Manager) submitForkBatch(height uint64) error { return nil } - +// updateStateForNextRevision updates dymint stored state in case next height corresponds to a new revision, to enable syncing (and validation) for rollapps with multiple revisions. func (m *Manager) updateStateForNextRevision() error { - + // in case fork is detected dymint state needs to be updated - + // get next revision according to node height nextRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - + // if next height is revision start height, update local state if nextRevision.StartHeight == m.State.NextHeight() { - + // Set proposer to nil to force updating it from SL m.State.SetProposer(nil) - + // Upgrade revision on state m.State.RevisionStartHeight = nextRevision.StartHeight m.State.SetRevision(nextRevision.Number) - + // update stored state _, err = m.Store.SaveState(m.State, nil) return err } return nil } - +// doForkWhenNewRevision creates and submit to SL fork blocks according to next revision start height. func (m *Manager) doForkWhenNewRevision() error { defer m.forkMu.Unlock() m.forkMu.Lock() - + // get revision next height expectedRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - + // create fork batch in case it has not been submitted yet if m.LastSettlementHeight.Load() < expectedRevision.StartHeight { instruction, err := m.createInstruction(expectedRevision) if err != nil { return err } - + // update revision with revision after fork m.State.SetRevision(instruction.Revision) - + // create and submit fork batch err = m.doFork(instruction) if err != nil { return err } } - + // this cannot happen. it means the revision number obtained is not the same or the next revision. unable to fork. if expectedRevision.Number != m.State.GetRevision() { panic("Inconsistent expected revision number from Hub. Unable to fork") } - + // remove instruction file after fork return types.DeleteInstructionFromDisk(m.RootDir) } diff --git a/block/fraud.go b/block/fraud.go index f543420eb..11a95c493 100644 --- a/block/fraud.go +++ b/block/fraud.go @@ -4,16 +4,16 @@ import ( "context" ) - - +// FraudHandler is an interface that defines a method to handle faults. +// Contract: should not be blocking. type FraudHandler interface { - - + // HandleFault handles a fault that occurred in the system. + // The fault is passed as an error type. HandleFault(ctx context.Context, fault error) } - - +// FreezeHandler is used to handle faults coming from executing and validating blocks. +// once a fault is detected, it publishes a DataHealthStatus event to the pubsub which sets the node in a frozen state. type FreezeHandler struct { m *Manager } diff --git a/block/initchain.go b/block/initchain.go index 48fea86a7..604bbe3cb 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -11,8 +11,8 @@ import ( ) func (m *Manager) RunInitChain() error { - - proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) + // Get the proposer at the initial height. If we're at genesis the height will be 0. + proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return fmt.Errorf("get proposer at height: %w", err) } @@ -25,13 +25,13 @@ func (m *Manager) RunInitChain() error { return err } - + // validate the resulting genesis bridge data against the hub err = m.ValidateGenesisBridgeData(res.GenesisBridgeDataBytes) if err != nil { return fmt.Errorf("Cannot validate genesis bridge data: %w. Please call `$EXECUTABLE dymint unsafe-reset-all` before the next launch to reset this node to genesis state.", err) } - + // update the state with only the consensus pubkey m.Executor.UpdateStateAfterInitChain(m.State, res) m.Executor.UpdateMempoolAfterInitChain(m.State) if _, err := m.Store.SaveState(m.State, nil); err != nil { @@ -41,8 +41,8 @@ func (m *Manager) RunInitChain() error { return nil } - - +// ValidateGenesisBridgeData validates the genesis bridge data from +// InitChainResponse against the rollapp genesis stored in the hub. func (m *Manager) ValidateGenesisBridgeData(dataBytes []byte) error { if len(dataBytes) == 0 { return fmt.Errorf("genesis bridge data is empty in InitChainResponse") diff --git a/block/manager.go b/block/manager.go index 06594e29d..61d74a6ab 100644 --- a/block/manager.go +++ b/block/manager.go @@ -36,95 +36,99 @@ import ( ) const ( - + // RunModeProposer represents a node running as a proposer RunModeProposer uint = iota - + // RunModeFullNode represents a node running as a full node RunModeFullNode ) - +// Manager is responsible for aggregating transactions into blocks. type Manager struct { logger types.Logger - + // Configuration Conf config.BlockManagerConfig Genesis *tmtypes.GenesisDoc GenesisChecksum string LocalKey crypto.PrivKey RootDir string - + // Store and execution Store store.Store State *types.State Executor ExecutorI - Sequencers *types.SequencerSet + Sequencers *types.SequencerSet // Sequencers is the set of sequencers that are currently active on the rollapp - + // Clients and servers Pubsub *pubsub.Server P2PClient *p2p.Client DAClient da.DataAvailabilityLayerClient SLClient settlement.ClientI - + // RunMode represents the mode of the node. Set during initialization and shouldn't change after that. RunMode uint - + // context used when freezing node Cancel context.CancelFunc Ctx context.Context - + // LastBlockTimeInSettlement is the time of last submitted block, used to measure batch skew time LastBlockTimeInSettlement atomic.Int64 - + // LastBlockTime is the time of last produced block, used to measure batch skew time LastBlockTime atomic.Int64 - + // mutex used to avoid stopping node when fork is detected but proposer is creating/sending fork batch forkMu sync.Mutex - - - - - + /* + Sequencer and full-node + */ + // The last height which was submitted to settlement, that we know of. When we produce new batches, we will + // start at this height + 1. + // It is ALSO used by the producer, because the producer needs to check if it can prune blocks and it won't + // prune anything that might be submitted in the future. Therefore, it must be atomic. LastSettlementHeight atomic.Uint64 - + // channel used to send the retain height to the pruning background loop pruningC chan int64 - + // indexer IndexerService *txindex.IndexerService - + // used to fetch blocks from DA. Sequencer will only fetch batches in case it requires to re-sync (in case of rollback). Full-node will fetch batches for syncing and validation. Retriever da.BatchRetriever - - - + /* + Full-node only + */ + // Protect against processing two blocks at once when there are two routines handling incoming gossiped blocks, + // and incoming DA blocks, respectively. retrieverMu sync.Mutex - - + // Cached blocks and commits, coming from P2P, for applying at future heights. The blocks may not be valid, because + // we can only do full validation in sequential order. blockCache *Cache - + // TargetHeight holds the value of the current highest block seen from either p2p (probably higher) or the DA TargetHeight atomic.Uint64 - + // Fraud handler FraudHandler FraudHandler - + // channel used to signal the syncing loop when there is a new state update available settlementSyncingC chan struct{} - + // channel used to signal the validation loop when there is a new state update available settlementValidationC chan struct{} - + // notifies when the node has completed syncing syncedFromSettlement *uchannel.Nudger - + // validates all non-finalized state updates from settlement, checking there is consistency between DA and P2P blocks, and the information in the state update. SettlementValidator *SettlementValidator } - +// NewManager creates new block Manager. func NewManager( localKey crypto.PrivKey, conf config.NodeConfig, @@ -151,7 +155,7 @@ func NewManager( mempool, proxyApp, eventBus, - NewConsensusMsgQueue(), + NewConsensusMsgQueue(), // TODO properly specify ConsensusMsgStream: https://github.com/dymensionxyz/dymint/issues/1125 logger, ) if err != nil { @@ -175,10 +179,10 @@ func NewManager( blockCache: &Cache{ cache: make(map[uint64]types.CachedBlock), }, - pruningC: make(chan int64, 10), - settlementSyncingC: make(chan struct{}, 1), - settlementValidationC: make(chan struct{}, 1), - syncedFromSettlement: uchannel.NewNudger(), + pruningC: make(chan int64, 10), // use of buffered channel to avoid blocking applyBlock thread. In case channel is full, pruning will be skipped, but the retain height can be pruned in the next iteration. + settlementSyncingC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing syncing process, but syncing height is updated, which means the ongoing syncing will sync to the new height. + settlementValidationC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing validation process, but validation height is updated, which means the ongoing validation will validate to the new height. + syncedFromSettlement: uchannel.NewNudger(), // used by the sequencer to wait till the node completes the syncing from settlement. } m.setFraudHandler(NewFreezeHandler(m)) err = m.LoadStateOnInit(store, genesis, logger) @@ -191,13 +195,13 @@ func NewManager( return nil, err } - + // update dymint state with next revision info err = m.updateStateForNextRevision() if err != nil { return nil, err } - + // validate configuration params and rollapp consensus params are in line err = m.ValidateConfigWithRollappParams() if err != nil { return nil, err @@ -208,10 +212,10 @@ func NewManager( return m, nil } - +// Start starts the block manager. func (m *Manager) Start(ctx context.Context) error { m.Ctx, m.Cancel = context.WithCancel(ctx) - + // Check if InitChain flow is needed if m.State.IsGenesis() { m.logger.Info("Running InitChain") @@ -221,9 +225,9 @@ func (m *Manager) Start(ctx context.Context) error { } } - - - + // Check if a proposer on the rollapp is set. In case no proposer is set on the Rollapp, fallback to the hub proposer (If such exists). + // No proposer on the rollapp means that at some point there was no available proposer. + // In case there is also no proposer on the hub to our current height, it means that the chain is halted. if m.State.GetProposer() == nil { m.logger.Info("No proposer on the rollapp, fallback to the hub proposer, if available") err := m.UpdateProposerFromSL() @@ -236,10 +240,10 @@ func (m *Manager) Start(ctx context.Context) error { } } - - - - + // checks if the the current node is the proposer either on rollapp or on the hub. + // In case of sequencer rotation, there's a phase where proposer rotated on Rollapp but hasn't yet rotated on hub. + // for this case, 2 nodes will get `true` for `AmIProposer` so the l2 proposer can produce blocks and the hub proposer can submit his last batch. + // The hub proposer, after sending the last state update, will panic and restart as full node. amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -249,30 +253,30 @@ func (m *Manager) Start(ctx context.Context) error { m.logger.Info("starting block manager", "mode", map[bool]string{true: "proposer", false: "full node"}[amIProposer]) - + // update local state from latest state in settlement err = m.updateFromLastSettlementState() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - + // send signal to syncing loop with last settlement state update m.triggerSettlementSyncing() - + // send signal to validation loop with last settlement state update m.triggerSettlementValidation() eg, ctx := errgroup.WithContext(m.Ctx) - + // Start the pruning loop in the background uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.PruningLoop(ctx) }) - + // Start the settlement sync loop in the background uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementSyncLoop(ctx) }) - + // Monitor sequencer set updates uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorSequencerSetUpdates(ctx) }) @@ -285,7 +289,7 @@ func (m *Manager) Start(ctx context.Context) error { return m.MonitorBalances(ctx) }) - + // run based on the node role if !amIProposer { return m.runAsFullNode(ctx, eg) } @@ -297,26 +301,26 @@ func (m *Manager) NextHeightToSubmit() uint64 { return m.LastSettlementHeight.Load() + 1 } - +// updateFromLastSettlementState retrieves last sequencers and state update from the Hub and updates local state with it func (m *Manager) updateFromLastSettlementState() error { - + // Update sequencers list from SL err := m.UpdateSequencerSetFromSL() if err != nil { - + // this error is not critical m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } - + // update latest height from SL latestHeight, err := m.SLClient.GetLatestHeight() if errors.Is(err, gerrc.ErrNotFound) { - + // The SL hasn't got any batches for this chain yet. m.logger.Info("No batches for chain found in SL.") - m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) + m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) //nolint:gosec // height is non-negative and falls in int64 m.LastBlockTimeInSettlement.Store(m.Genesis.GenesisTime.UTC().UnixNano()) return nil } if err != nil { - + // TODO: separate between fresh rollapp and non-registered rollapp return err } @@ -327,10 +331,10 @@ func (m *Manager) updateFromLastSettlementState() error { m.LastSettlementHeight.Store(latestHeight) - + // init last block in settlement time in dymint state to calculate batch submit skew time m.SetLastBlockTimeInSettlementFromHeight(latestHeight) - + // init last block time in dymint state to calculate batch submit skew time block, err := m.Store.LoadBlock(m.State.Height()) if err == nil { m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) @@ -339,7 +343,7 @@ func (m *Manager) updateFromLastSettlementState() error { } func (m *Manager) updateLastFinalizedHeightFromSettlement() error { - + // update latest finalized height from SL height, err := m.SLClient.GetLatestFinalizedHeight() if errors.Is(err, gerrc.ErrNotFound) { m.logger.Info("No finalized batches for chain found in SL.") @@ -368,7 +372,7 @@ func (m *Manager) UpdateTargetHeight(h uint64) { } } - +// ValidateConfigWithRollappParams checks the configuration params are consistent with the params in the dymint state (e.g. DA and version) func (m *Manager) ValidateConfigWithRollappParams() error { if da.Client(m.State.RollappParams.Da) != m.DAClient.GetClientType() { return fmt.Errorf("da client mismatch. rollapp param: %s da configured: %s", m.State.RollappParams.Da, m.DAClient.GetClientType()) @@ -381,7 +385,7 @@ func (m *Manager) ValidateConfigWithRollappParams() error { return nil } - +// setDA initializes DA client in blockmanager according to DA type set in genesis or stored in state func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) error { daLayer := m.State.RollappParams.Da dalc := registry.GetClient(daLayer) @@ -402,12 +406,12 @@ func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) err return nil } - +// setFraudHandler sets the fraud handler for the block manager. func (m *Manager) setFraudHandler(handler *FreezeHandler) { m.FraudHandler = handler } - +// freezeNode sets the node as unhealthy and prevents the node continues producing and processing blocks func (m *Manager) freezeNode(err error) { m.logger.Info("Freezing node", "err", err) if m.Ctx.Err() != nil { @@ -417,11 +421,11 @@ func (m *Manager) freezeNode(err error) { m.Cancel() } - +// SetLastBlockTimeInSettlementFromHeight is used to initialize LastBlockTimeInSettlement from rollapp height in settlement func (m *Manager) SetLastBlockTimeInSettlementFromHeight(lastSettlementHeight uint64) { block, err := m.Store.LoadBlock(lastSettlementHeight) if err != nil { - + // if settlement height block is not found it will be updated after, when syncing return } m.LastBlockTimeInSettlement.Store(block.Header.GetTimestamp().UTC().UnixNano()) diff --git a/block/modes.go b/block/modes.go index e8a48d33f..adfd56432 100644 --- a/block/modes.go +++ b/block/modes.go @@ -20,43 +20,43 @@ const ( p2pBlocksyncLoop = "applyBlockSyncBlocksLoop" ) - +// setFraudHandler sets the fraud handler for the block manager. func (m *Manager) runAsFullNode(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "full node") m.RunMode = RunModeFullNode - + // update latest finalized height err := m.updateLastFinalizedHeightFromSettlement() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - + // Start the settlement validation loop in the background uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementValidateLoop(ctx) }) m.subscribeFullNodeEvents(ctx) - + // remove instruction file after fork to avoid enter fork loop again return types.DeleteInstructionFromDisk(m.RootDir) } func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "proposer") m.RunMode = RunModeProposer - + // Subscribe to batch events, to update last submitted height in case batch confirmation was lost. This could happen if the sequencer crash/restarted just after submitting a batch to the settlement and by the time we query the last batch, this batch wasn't accepted yet. go uevent.MustSubscribe(ctx, m.Pubsub, "updateSubmittedHeightLoop", settlement.EventQueryNewSettlementBatchAccepted, m.UpdateLastSubmittedHeight, m.logger) - + // Subscribe to P2P received blocks events (used for P2P syncing). go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) - - + // Sequencer must wait till the DA light client is synced. Otherwise it will fail when submitting blocks. + // Full-nodes does not need to wait, but if it tries to fetch blocks from DA heights previous to the DA light client height it will fail, and it will retry till it reaches the height. m.DAClient.WaitForSyncing() - + // Sequencer must wait till node is synced till last submittedHeight, in case it is not m.waitForSettlementSyncing() - + // it is checked again whether the node is the active proposer, since this could have changed after syncing. amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -65,28 +65,28 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { return fmt.Errorf("the node is no longer the proposer. please restart.") } - + // update l2 proposer from SL in case it changed after syncing err = m.UpdateProposerFromSL() if err != nil { return err } - + // doForkWhenNewRevision executes fork if necessary err = m.doForkWhenNewRevision() if err != nil { return err } - + // check if we should rotate shouldRotate, err := m.ShouldRotate() if err != nil { return fmt.Errorf("checking should rotate: %w", err) } if shouldRotate { - m.rotate(ctx) + m.rotate(ctx) // panics afterwards } - + // populate the bytes produced channel bytesProducedC := make(chan int) uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -94,18 +94,18 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { }) uerrors.ErrGroupGoLog(eg, m.logger, func() error { - bytesProducedC <- m.GetUnsubmittedBytes() + bytesProducedC <- m.GetUnsubmittedBytes() // load unsubmitted bytes from previous run return m.ProduceBlockLoop(ctx, bytesProducedC) }) - + // Monitor and handling of the rotation uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorProposerRotation(ctx) }) go func() { err = eg.Wait() - + // Check if loops exited due to sequencer rotation signal if errors.Is(err, errRotationRequested) { m.rotate(ctx) } else if err != nil { @@ -118,11 +118,11 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { } func (m *Manager) subscribeFullNodeEvents(ctx context.Context) { - + // Subscribe to new (or finalized) state updates events. go uevent.MustSubscribe(ctx, m.Pubsub, syncLoop, settlement.EventQueryNewSettlementBatchAccepted, m.onNewStateUpdate, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, validateLoop, settlement.EventQueryNewSettlementBatchFinalized, m.onNewStateUpdateFinalized, m.logger) - + // Subscribe to P2P received blocks events (used for P2P syncing). go uevent.MustSubscribe(ctx, m.Pubsub, p2pGossipLoop, p2p.EventQueryNewGossipedBlock, m.OnReceivedBlock, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) } diff --git a/block/p2p.go b/block/p2p.go index c1c679dd3..6dcae3c5e 100644 --- a/block/p2p.go +++ b/block/p2p.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - +// onReceivedBlock receives a block received event from P2P, saves the block to a cache and tries to apply the blocks from the cache. func (m *Manager) OnReceivedBlock(event pubsub.Message) { eventData, ok := event.Data().(p2p.BlockData) if !ok { @@ -40,9 +40,9 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { if block.Header.Height < m.State.NextHeight() { return } - m.retrieverMu.Lock() + m.retrieverMu.Lock() // needed to protect blockCache access - + // It is not strictly necessary to return early, for correctness, but doing so helps us avoid mutex pressure and unnecessary repeated attempts to apply cached blocks if m.blockCache.Has(height) { m.retrieverMu.Unlock() return @@ -54,7 +54,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { m.logger.Debug("Received new block from p2p.", "block height", height, "source", source.String(), "store height", m.State.Height(), "n cachedBlocks", m.blockCache.Size()) m.blockCache.Add(height, &block, &commit, source) - m.retrieverMu.Unlock() + m.retrieverMu.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant err := m.attemptApplyCachedBlocks() if err != nil { @@ -63,7 +63,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { } } - +// gossipBlock sends created blocks by the sequencer to full-nodes using P2P gossipSub func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { m.logger.Info("Gossipping block", "height", block.Header.Height) gossipedBlock := p2p.BlockData{Block: block, Commit: commit} @@ -72,15 +72,15 @@ func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit typ return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) } if err := m.P2PClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { - - + // Although this boils down to publishing on a topic, we don't want to speculate too much on what + // could cause that to fail, so we assume recoverable. return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) } return nil } - +// This function adds the block to blocksync store to enable P2P retrievability func (m *Manager) saveP2PBlockToBlockSync(block *types.Block, commit *types.Commit) error { gossipedBlock := p2p.BlockData{Block: *block, Commit: *commit} gossipedBlockBytes, err := gossipedBlock.MarshalBinary() diff --git a/block/produce.go b/block/produce.go index a2d4ffa64..9a67fe77b 100644 --- a/block/produce.go +++ b/block/produce.go @@ -20,9 +20,9 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - +// ProduceBlockLoop is calling publishBlock in a loop as long as we're synced. +// A signal will be sent to the bytesProduced channel for each block produced +// In this way it's possible to pause block production by not consuming the channel func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) error { m.logger.Info("Started block producer loop.") @@ -40,12 +40,12 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) case <-ctx.Done(): return nil case <-ticker.C: - + // Only produce if I'm the current rollapp proposer. if !m.AmIProposerOnRollapp() { continue } - + // if empty blocks are configured to be enabled, and one is scheduled... produceEmptyBlock := firstBlock || m.Conf.MaxIdleTime == 0 || nextEmptyBlock.Before(time.Now()) firstBlock = false @@ -54,7 +54,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) m.logger.Error("Produce and gossip: context canceled.", "error", err) return nil } - if errors.Is(err, types.ErrEmptyBlock) { + if errors.Is(err, types.ErrEmptyBlock) { // occurs if the block was empty but we don't want to produce one continue } if errors.Is(err, ErrNonRecoverable) { @@ -68,8 +68,8 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) } nextEmptyBlock = time.Now().Add(m.Conf.MaxIdleTime) if 0 < len(block.Data.Txs) { - - + // the block wasn't empty so we want to make sure we don't wait too long before producing another one, in order to facilitate proofs for ibc + // TODO: optimize to only do this if IBC transactions are present (https://github.com/dymensionxyz/dymint/issues/709) nextEmptyBlock = time.Now().Add(m.Conf.MaxProofTime) } else { m.logger.Info("Produced empty block.") @@ -102,10 +102,10 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) type ProduceBlockOptions struct { AllowEmpty bool MaxData *uint64 - NextProposerHash *[32]byte + NextProposerHash *[32]byte // optional, used for last block } - +// ProduceApplyGossipLastBlock produces and applies a block with the given NextProposerHash. func (m *Manager) ProduceApplyGossipLastBlock(ctx context.Context, nextProposerHash [32]byte) (err error) { _, _, err = m.produceApplyGossip(ctx, ProduceBlockOptions{ AllowEmpty: true, @@ -119,22 +119,22 @@ func (m *Manager) ProduceApplyGossipBlock(ctx context.Context, opts ProduceBlock } func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptions) (block *types.Block, commit *types.Commit, err error) { - - - - - - - + // Snapshot sequencer set to check if there are sequencer set updates. + // It fills the consensus messages queue for all the new sequencers. + // + // Note that there cannot be any recoverable errors between when the queue is filled and dequeued; + // otherwise, the queue may grow uncontrollably if there is a recoverable error loop in the middle. + // + // All errors in this method are non-recoverable. newSequencerSet, err := m.SnapshotSequencerSet() if err != nil { return nil, nil, fmt.Errorf("snapshot sequencer set: %w", err) } - - + // We do not want to wait for a new block created to propagate a new sequencer set. + // Therefore, we force an empty block if there are any sequencer set updates. opts.AllowEmpty = opts.AllowEmpty || len(newSequencerSet) > 0 - + // If I'm not the current rollapp proposer, I should not produce a blocks. block, commit, err = m.produceBlock(opts) if err != nil { return nil, nil, fmt.Errorf("produce block: %w", err) @@ -151,50 +151,50 @@ func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptio return block, commit, nil } - - - - - - - - - - - - - - +// SnapshotSequencerSet loads two versions of the sequencer set: +// - the one that was used for the last block (from the store) +// - and the most recent one (from the manager memory) +// +// It then calculates the diff between the two and creates consensus messages for the new sequencers, +// i.e., only for the diff between two sets. If there is any diff (i.e., the sequencer set is updated), +// the method returns the entire new set. The new set will be used for next block and will be stored +// in the state instead of the old set after the block production. +// +// The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary +// UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set +// is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, +// on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. +// Though single-time duplication it's not a big deal. func (m *Manager) SnapshotSequencerSet() (sequencersAfterUpdate types.Sequencers, err error) { - + // the most recent sequencer set sequencersAfterUpdate = m.Sequencers.GetAll() - + // the sequencer set that was used for the last block lastSequencers, err := m.Store.LoadLastBlockSequencerSet() - - + // it's okay if the last sequencer set is not found, it can happen on genesis or after + // rotation from the full node to the proposer if err != nil && !errors.Is(err, gerrc.ErrNotFound) { - + // unexpected error from the store is non-recoverable return nil, fmt.Errorf("load last block sequencer set: %w: %w", err, ErrNonRecoverable) } - + // diff between the two sequencer sets newSequencers := types.SequencerListRightOuterJoin(lastSequencers, sequencersAfterUpdate) if len(newSequencers) == 0 { - + // nothing to upsert, nothing to persist return nil, nil } - - + // Create consensus msgs for new sequencers. + // It can fail only on decoding or internal errors this is non-recoverable. msgs, err := ConsensusMsgsOnSequencerSetUpdate(newSequencers) if err != nil { return nil, fmt.Errorf("consensus msgs on sequencers set update: %w: %w", err, ErrNonRecoverable) } m.Executor.AddConsensusMsgs(msgs...) - + // return the entire new set if there is any update return sequencersAfterUpdate, nil } @@ -202,18 +202,18 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := m.GetPreviousBlockHashes(newHeight) if err != nil { - + // the error here is always non-recoverable, see GetPreviousBlockHashes() for details return nil, nil, fmt.Errorf("load prev block: %w", err) } var block *types.Block var commit *types.Commit - - + // Check if there's an already stored block and commit at a newer height + // If there is use that instead of creating a new block pendingBlock, err := m.Store.LoadBlock(newHeight) if err == nil { - + // Using an existing block block = pendingBlock commit, err = m.Store.LoadCommit(newHeight) if err != nil { @@ -230,16 +230,16 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C maxBlockDataSize = *opts.MaxData } proposerHashForBlock := [32]byte(m.State.GetProposerHash()) - + // if NextProposerHash is set, we create a last block if opts.NextProposerHash != nil { maxBlockDataSize = 0 proposerHashForBlock = *opts.NextProposerHash } - + // dequeue consensus messages for the new sequencers while creating a new block block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, proposerHashForBlock, m.State, maxBlockDataSize) - - + // this cannot happen if there are any sequencer set updates + // AllowEmpty should be always true in this case if !opts.AllowEmpty && len(block.Data.Txs) == 0 { return nil, nil, fmt.Errorf("%w: %w", types.ErrEmptyBlock, ErrRecoverable) } @@ -255,7 +255,7 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C return block, commit, nil } - +// create commit for block func (m *Manager) createCommit(block *types.Block) (*types.Commit, error) { abciHeaderPb := types.ToABCIHeaderPB(&block.Header) abciHeaderBytes, err := abciHeaderPb.Marshal() @@ -290,7 +290,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, headerHash := block.Header.Hash() vote := tmtypes.Vote{ Type: cmtproto.PrecommitType, - Height: int64(block.Header.Height), + Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 Round: 0, Timestamp: voteTimestamp, BlockID: tmtypes.BlockID{Hash: headerHash[:], PartSetHeader: tmtypes.PartSetHeader{ @@ -301,18 +301,18 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, ValidatorIndex: 0, } v := vote.ToProto() - - + // convert libp2p key to tm key + // TODO: move to types rawKey, _ := m.LocalKey.Raw() tmprivkey := tmed25519.PrivKey(rawKey) tmprivkey.PubKey().Bytes() - + // Create a mock validator to sign the vote tmvalidator := tmtypes.NewMockPVWithParams(tmprivkey, false, false) err := tmvalidator.SignVote(m.State.ChainID, v) if err != nil { return nil, err } - + // Update the vote with the signature vote.Signature = v.Signature pubKey := tmprivkey.PubKey() voteSignBytes := tmtypes.VoteSignBytes(m.State.ChainID, v) @@ -322,12 +322,12 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, return vote.Signature, nil } - - +// GetPreviousBlockHashes returns the hash of the last block and the commit for the last block +// to be used as the previous block hash and commit for the next block func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]byte, lastCommit *types.Commit, err error) { - lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) + lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) // prev height = forHeight - 1 if err != nil { - if !m.State.IsGenesis() { + if !m.State.IsGenesis() { // allow prevBlock not to be found only on genesis return [32]byte{}, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } lastHeaderHash = [32]byte{} @@ -336,7 +336,7 @@ func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]b return lastHeaderHash, lastCommit, nil } - +// getHeaderHashAndCommit returns the Header Hash and Commit for a given height func getHeaderHashAndCommit(store store.Store, height uint64) ([32]byte, *types.Commit, error) { lastCommit, err := store.LoadCommit(height) if err != nil { diff --git a/block/pruning.go b/block/pruning.go index 9576938d1..9a92451e9 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -4,9 +4,9 @@ import ( "context" ) - +// Prune function prune all block related data from dymint store and blocksync store up to (but not including) retainHeight. func (m *Manager) Prune(retainHeight uint64) { - + // logging pruning result logResult := func(err error, source string, retainHeight uint64, pruned uint64) { if err != nil { m.logger.Error("pruning", "from", source, "retain height", retainHeight, "err", err) @@ -15,20 +15,20 @@ func (m *Manager) Prune(retainHeight uint64) { } } - + // prune blocks from blocksync store pruned, err := m.P2PClient.RemoveBlocks(context.Background(), retainHeight) logResult(err, "blocksync", retainHeight, pruned) - + // prune indexed block and txs and associated events pruned, err = m.IndexerService.Prune(retainHeight, m.Store) logResult(err, "indexer", retainHeight, pruned) - + // prune blocks from dymint store pruned, err = m.Store.PruneStore(retainHeight, m.logger) logResult(err, "dymint store", retainHeight, pruned) } - +//nolint:gosec // height is non-negative and falls in int64 func (m *Manager) PruningLoop(ctx context.Context) error { for { select { @@ -36,9 +36,9 @@ func (m *Manager) PruningLoop(ctx context.Context) error { return nil case retainHeight := <-m.pruningC: var pruningHeight uint64 - if m.RunMode == RunModeProposer { + if m.RunMode == RunModeProposer { // do not delete anything that we might submit in future pruningHeight = min(m.NextHeightToSubmit(), uint64(retainHeight)) - } else { + } else { // do not delete anything that is not validated yet pruningHeight = min(m.SettlementValidator.NextValidationHeight(), uint64(retainHeight)) } m.Prune(pruningHeight) diff --git a/block/retriever.go b/block/retriever.go index 850a9ed9e..3475bd398 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -22,7 +22,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { m.retrieverMu.Lock() defer m.retrieverMu.Unlock() - + // if batch blocks have already been applied skip, otherwise it will fail in endheight validation (it can happen when syncing from blocksync in parallel). if m.State.Height() > slBatch.EndHeight { return nil } @@ -30,7 +30,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { blockIndex := 0 for _, batch := range batchResp.Batches { for i, block := range batch.Blocks { - + // We dont apply a block if not included in the block descriptor (adds support for rollback) if blockIndex >= len(slBatch.BlockDescriptors) { break } @@ -45,7 +45,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return err } - + // We dont validate because validateBlockBeforeApply already checks if the block is already applied, and we don't need to fail there. err := m.applyBlockWithFraudHandling(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) if err != nil { return fmt.Errorf("apply block: height: %d: %w", block.Header.Height, err) @@ -55,7 +55,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { } } - + // validate the batch applied successfully and we are at the end height if m.State.Height() != slBatch.EndHeight { return fmt.Errorf("state height mismatch: state height: %d: batch end height: %d", m.State.Height(), slBatch.EndHeight) } @@ -63,14 +63,14 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return nil } - - - - - - - - +// Used it when doing local rollback, and applying same blocks (instead of producing new ones) +// it was used for an edge case, eg: +// seq produced block H and gossiped +// bug in code produces app mismatch across nodes +// bug fixed, state rolled back to H-1 +// if seq produces new block H, it can lead to double signing, as the old block can still be in the p2p network +// ---- +// when this scenario encountered previously, we wanted to apply same block instead of producing new one func (m *Manager) applyLocalBlock() error { defer m.retrieverMu.Unlock() m.retrieverMu.Lock() @@ -101,7 +101,7 @@ func (m *Manager) applyLocalBlock() error { } func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - + // Check DA client if daMetaData.Client != m.DAClient.GetClientType() { return da.ResultRetrieveBatch{ BaseResult: da.BaseResult{ @@ -112,9 +112,9 @@ func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveB } } - + // batchRes.MetaData includes proofs necessary to open disputes with the Hub batchRes := m.Retriever.RetrieveBatches(daMetaData) - - + // TODO(srene) : for invalid transactions there is no specific error code since it will need to be validated somewhere else for fraud proving. + // NMT proofs (availRes.MetaData.Proofs) are included in the result batchRes, necessary to be included in the dispute return batchRes } diff --git a/block/sequencers.go b/block/sequencers.go index ab0597222..ca6155397 100644 --- a/block/sequencers.go +++ b/block/sequencers.go @@ -14,7 +14,7 @@ const ( var errRotationRequested = fmt.Errorf("sequencer rotation started. signal to stop production") func (m *Manager) MonitorProposerRotation(ctx context.Context) error { - ticker := time.NewTicker(ProposerMonitorInterval) + ticker := time.NewTicker(ProposerMonitorInterval) // TODO: make this configurable defer ticker.Stop() for { @@ -27,12 +27,12 @@ func (m *Manager) MonitorProposerRotation(ctx context.Context) error { m.logger.Error("Check rotation in progress", "err", err) continue } - + // no rotation in progress if nextProposer == nil { continue } - + // we get here once a sequencer rotation signal is received m.logger.Info("Sequencer rotation started.", "nextSeqAddr", nextProposer.SettlementAddress) return errRotationRequested } @@ -50,18 +50,18 @@ func (m *Manager) MonitorSequencerSetUpdates(ctx context.Context) error { case <-ticker.C: err := m.UpdateSequencerSetFromSL() if err != nil { - + // this error is not critical m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } } } } - - +// AmIProposerOnSL checks if the current node is the proposer on the hub +// Proposer on the Hub is not necessarily the proposer on the Rollapp during rotation phase. func (m *Manager) AmIProposerOnSL() (bool, error) { localProposerKeyBytes, _ := m.LocalKey.GetPublic().Raw() - + // get hub proposer key SLProposer, err := m.SLClient.GetProposerAtHeight(-1) if err != nil { return false, fmt.Errorf("get proposer at height: %w", err) @@ -69,8 +69,8 @@ func (m *Manager) AmIProposerOnSL() (bool, error) { return bytes.Equal(SLProposer.PubKey().Bytes(), localProposerKeyBytes), nil } - - +// AmIProposerOnRollapp checks if the current node is the proposer on the rollapp. +// Proposer on the rollapp is not necessarily the proposer on the hub during rotation phase. func (m *Manager) AmIProposerOnRollapp() bool { if m.State.GetProposer() == nil { return false @@ -81,8 +81,8 @@ func (m *Manager) AmIProposerOnRollapp() bool { return bytes.Equal(rollappProposer, localProposerKeyBytes) } - - +// ShouldRotate checks if the we are in the middle of rotation and we are the rotating proposer (i.e current proposer on the hub). +// We check it by checking if there is a "next" proposer on the hub which is not us. func (m *Manager) ShouldRotate() (bool, error) { nextProposer, err := m.SLClient.GetNextProposer() if err != nil { @@ -91,8 +91,8 @@ func (m *Manager) ShouldRotate() (bool, error) { if nextProposer == nil { return false, nil } - - + // At this point we know that there is a next proposer, + // so we should rotate only if we are the current proposer on the hub amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return false, fmt.Errorf("am i proposer on SL: %w", err) @@ -100,13 +100,13 @@ func (m *Manager) ShouldRotate() (bool, error) { return amIProposerOnSL, nil } - - - - - +// rotate rotates current proposer by doing the following: +// 1. Creating last block with the new proposer, which will stop him from producing blocks. +// 2. Submitting the last batch +// 3. Panicing so the node restarts as full node +// Note: In case he already created his last block, he will only try to submit the last batch. func (m *Manager) rotate(ctx context.Context) { - + // Get Next Proposer from SL. We assume such exists (even if empty proposer) otherwise function wouldn't be called. nextProposer, err := m.SLClient.GetNextProposer() if err != nil || nextProposer == nil { panic(fmt.Sprintf("rotate: fetch next proposer set from Hub: %v", err)) @@ -127,8 +127,8 @@ func (m *Manager) rotate(ctx context.Context) { panic("rotate: sequencer is no longer the proposer. restarting as a full node") } - - +// CreateAndPostLastBatch creates and posts the last batch to the hub +// this called after manager shuts down the block producer and submitter func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]byte) error { h := m.State.Height() block, err := m.Store.LoadBlock(h) @@ -136,8 +136,8 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return fmt.Errorf("load block: height: %d: %w", h, err) } - - + // check if the last block already produced with NextProposerHash set. + // After creating the last block, the sequencer will be restarted so it will not be able to produce blocks anymore. if bytes.Equal(block.Header.NextSequencersHash[:], nextSeqHash[:]) { m.logger.Debug("Last block already produced and applied.") } else { @@ -147,7 +147,7 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by } } - + // Submit all data accumulated thus far and the last state update for { b, err := m.CreateAndSubmitBatch(m.Conf.BatchSubmitBytes, true) if err != nil { @@ -162,9 +162,9 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return nil } - - - +// UpdateSequencerSetFromSL updates the sequencer set from the SL. The sequencer set is saved only in memory. +// It will be persisted to the store when the block is produced (only in the proposer mode). +// Proposer is not changed here. func (m *Manager) UpdateSequencerSetFromSL() error { seqs, err := m.SLClient.GetAllSequencers() if err != nil { @@ -175,9 +175,9 @@ func (m *Manager) UpdateSequencerSetFromSL() error { return nil } - +// UpdateProposerFromSL queries the hub and updates the local dymint state proposer at the current height func (m *Manager) UpdateProposerFromSL() error { - SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) + SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return fmt.Errorf("get proposer at height: %w", err) } diff --git a/block/slvalidator.go b/block/slvalidator.go index 700911dc5..bf9b8ac0a 100644 --- a/block/slvalidator.go +++ b/block/slvalidator.go @@ -13,14 +13,14 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// SettlementValidator validates batches from settlement layer with the corresponding blocks from DA and P2P. type SettlementValidator struct { logger types.Logger blockManager *Manager lastValidatedHeight atomic.Uint64 } - +// NewSettlementValidator returns a new StateUpdateValidator instance. func NewSettlementValidator(logger types.Logger, blockManager *Manager) *SettlementValidator { lastValidatedHeight, err := blockManager.Store.LoadValidationHeight() if err != nil { @@ -36,13 +36,13 @@ func NewSettlementValidator(logger types.Logger, blockManager *Manager) *Settlem return validator } - - - +// ValidateStateUpdate validates that the blocks from the state info are available in DA, +// that the information included in the Hub state info matches the blocks retrieved from DA +// and those blocks are the same that are obtained via P2P. func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrieveBatch) error { v.logger.Debug("validating state update", "start height", batch.StartHeight, "end height", batch.EndHeight) - + // loads blocks applied from P2P, if any. p2pBlocks := make(map[uint64]*types.Block) for height := batch.StartHeight; height <= batch.EndHeight; height++ { source, err := v.blockManager.Store.LoadBlockSource(height) @@ -51,7 +51,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie continue } - + // if block is not P2P block, skip if source != types.Gossiped && source != types.BlockSync { continue } @@ -64,7 +64,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie p2pBlocks[block.Header.Height] = block } - + // load all DA blocks from the batch to be validated var daBatch da.ResultRetrieveBatch for { daBatch = v.blockManager.Retriever.RetrieveBatches(batch.MetaData.DA) @@ -72,18 +72,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie break } - + // fraud detected in case blob is retrieved but unable to get blocks from it. if errors.Is(daBatch.BaseResult.Error, da.ErrBlobNotParsed) { return types.NewErrStateUpdateBlobCorruptedFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - + // fraud detected in case availability checks fail and therefore there certainty the blob, according to the state update DA path, is not available. checkBatchResult := v.blockManager.Retriever.CheckBatchAvailability(batch.MetaData.DA) if errors.Is(checkBatchResult.Error, da.ErrBlobNotIncluded) { return types.NewErrStateUpdateBlobNotAvailableFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - + // FIXME: how to handle non-happy case? not returning error? continue } @@ -93,18 +93,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie types.LastReceivedDAHeightGauge.Set(float64(batch.EndHeight())) } - + // validate DA blocks against the state update err := v.ValidateDaBlocks(batch, daBlocks) if err != nil { return err } - + // nothing to validate at P2P level, finish here. if len(p2pBlocks) == 0 { return nil } - + // validate P2P blocks against DA blocks err = v.ValidateP2PBlocks(daBlocks, p2pBlocks) if err != nil { return err @@ -113,10 +113,10 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie return nil } - - +// ValidateP2PBlocks basically compares that the blocks applied from P2P are the same blocks included in the batch and retrieved from DA. +// Since DA blocks have been already validated against Hub state info block descriptors, if P2P blocks match with DA blocks, it means they are also validated against state info block descriptors. func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBlocks map[uint64]*types.Block) error { - + // iterate over daBlocks and compare hashes with the corresponding block from P2P (if exists) to see whether they are actually the same block for _, daBlock := range daBlocks { p2pBlock, ok := p2pBlocks[daBlock.Header.Height] @@ -140,9 +140,9 @@ func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBloc return nil } - +// ValidateDaBlocks checks that the information included in the Hub state info (height, state roots and timestamps), correspond to the blocks obtained from DA. func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetrieveBatch, daBlocks []*types.Block) error { - + // we first verify the numblocks included in the state info match the block descriptors and the blocks obtained from DA numSlBDs := uint64(len(slBatch.BlockDescriptors)) numSLBlocks := slBatch.NumBlocks numDABlocks := uint64(len(daBlocks)) @@ -150,36 +150,36 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return types.NewErrStateUpdateNumBlocksNotMatchingFraud(slBatch.EndHeight, numSLBlocks, numSLBlocks, numDABlocks) } - + // we compare all DA blocks against the information included in the state info block descriptors for i, bd := range slBatch.BlockDescriptors { - + // height check if bd.Height != daBlocks[i].Header.Height { return types.NewErrStateUpdateHeightNotMatchingFraud(slBatch.StateIndex, slBatch.BlockDescriptors[0].Height, daBlocks[0].Header.Height, slBatch.BlockDescriptors[len(slBatch.BlockDescriptors)-1].Height, daBlocks[len(daBlocks)-1].Header.Height) } - + // we compare the state root between SL state info and DA block if !bytes.Equal(bd.StateRoot, daBlocks[i].Header.AppHash[:]) { return types.NewErrStateUpdateStateRootNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.StateRoot, daBlocks[i].Header.AppHash[:]) } - + // we compare the timestamp between SL state info and DA block if !bd.Timestamp.Equal(daBlocks[i].Header.GetTimestamp()) { return types.NewErrStateUpdateTimestampNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.Timestamp, daBlocks[i].Header.GetTimestamp()) } - + // we validate block descriptor drs version per height err := v.validateDRS(slBatch.StateIndex, bd.Height, bd.DrsVersion) if err != nil { return err } } - - - - + // we compare the sequencer address between SL state info and DA block + // if next sequencer is not set, we check if the sequencer hash is equal to the next sequencer hash + // because it did not change. If the next sequencer is set, we check if the next sequencer hash is equal on the + // last block of the batch lastDABlock := daBlocks[numSlBDs-1] - + // if lastDaBlock is previous block to fork, dont validate nextsequencerhash of last block because it will not match if v.blockManager.State.RevisionStartHeight-1 == lastDABlock.Header.Height { v.logger.Debug("DA blocks, previous to fork, validated successfully", "start height", daBlocks[0].Header.Height, "end height", daBlocks[len(daBlocks)-1].Header.Height) return nil @@ -202,8 +202,8 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return nil } - - +// UpdateLastValidatedHeight sets the height saved in the Store if it is higher than the existing height +// returns OK if the value was updated successfully or did not need to be updated func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { for { curr := v.lastValidatedHeight.Load() @@ -217,17 +217,17 @@ func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { } } - +// GetLastValidatedHeight returns the most last block height that is validated with settlement state updates. func (v *SettlementValidator) GetLastValidatedHeight() uint64 { return v.lastValidatedHeight.Load() } - +// NextValidationHeight returns the next height that needs to be validated with settlement state updates. func (v *SettlementValidator) NextValidationHeight() uint64 { return v.lastValidatedHeight.Load() + 1 } - +// validateDRS compares the DRS version stored for the specific height, obtained from rollapp params. func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, version uint32) error { drs, err := v.blockManager.Store.LoadDRSVersion(height) if err != nil { @@ -240,7 +240,7 @@ func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, vers return nil } - +// blockHash generates a hash from the block bytes to compare them func blockHash(block *types.Block) ([]byte, error) { blockBytes, err := block.MarshalBinary() if err != nil { diff --git a/block/state.go b/block/state.go index 2d052de06..7b1991bc2 100644 --- a/block/state.go +++ b/block/state.go @@ -19,7 +19,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// LoadStateOnInit tries to load lastState from Store, and if it's not available it reads GenesisDoc. func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) error { s, err := store.LoadState() if errors.Is(err, types.ErrNoStateFound) { @@ -36,18 +36,18 @@ func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc return nil } - - +// NewStateFromGenesis reads blockchain State from genesis. +// The active sequencer list will be set on InitChain func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { err := genDoc.ValidateAndComplete() if err != nil { return nil, fmt.Errorf("in genesis doc: %w", err) } - - - - + // InitStateVersion sets the Consensus.Block and Software versions, + // but leaves the Consensus.App version blank. + // The Consensus.App version will be set during the Handshake, once + // we hear from the app what protocol version it is running. InitStateVersion := tmstate.Version{ Consensus: tmversion.Consensus{ Block: version.BlockProtocol, @@ -59,7 +59,7 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { s := types.State{ Version: InitStateVersion, ChainID: genDoc.ChainID, - InitialHeight: uint64(genDoc.InitialHeight), + InitialHeight: uint64(genDoc.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 ConsensusParams: *genDoc.ConsensusParams, } s.SetHeight(0) @@ -73,29 +73,29 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { return &s, nil } - +// UpdateStateFromApp is responsible for aligning the state of the store from the abci app func (m *Manager) UpdateStateFromApp(blockHeaderHash [32]byte) error { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return errorsmod.Wrap(err, "get app info") } - appHeight := uint64(proxyAppInfo.LastBlockHeight) + appHeight := uint64(proxyAppInfo.LastBlockHeight) //nolint:gosec // height is non-negative and falls in int64 resp, err := m.Store.LoadBlockResponses(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } - + // update the state with the app hashes created on the app commit m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, blockHeaderHash) return nil } func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseInitChain) { - - - + // If the app did not return an app hash, we keep the one set from the genesis doc in + // the state. We don't set appHash since we don't want the genesis doc app hash + // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. if len(res.AppHash) > 0 { copy(s.AppHash[:], res.AppHash) } @@ -106,7 +106,7 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.ConsensusParams.Block.MaxGas = params.Block.MaxGas } } - + // We update the last results hash with the empty hash, to conform with RFC-6962. copy(s.LastResultsHash[:], merkle.HashFromByteSlices(nil)) } @@ -115,7 +115,7 @@ func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } - +// UpdateStateAfterCommit updates the state with the app hash and last results hash func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) @@ -132,26 +132,26 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp } } - - - +// UpdateProposerFromBlock updates the proposer from the block +// The next proposer is defined in the block header (NextSequencersHash) +// TODO: (https://github.com/dymensionxyz/dymint/issues/1008) func (e *Executor) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool { - + // no sequencer change if bytes.Equal(block.Header.SequencerHash[:], block.Header.NextSequencersHash[:]) { return false } if block.Header.NextSequencersHash == [32]byte{} { - - + // the chain will be halted until proposer is set + // TODO: recover from halt (https://github.com/dymensionxyz/dymint/issues/1021) e.logger.Info("rollapp left with no proposer. chain is halted") s.SetProposer(nil) return true } - - - + // if hash changed, update the proposer + // We assume here that we're updated with the latest sequencer set + // FIXME: Think how to handle not being updated with the latest sequencer set seq, found := seqSet.GetByHash(block.Header.NextSequencersHash[:]) if !found { e.logger.Error("cannot find proposer by hash") diff --git a/block/submit.go b/block/submit.go index 87150c3c9..3ee4e2dc4 100644 --- a/block/submit.go +++ b/block/submit.go @@ -17,11 +17,11 @@ import ( uchannel "github.com/dymensionxyz/dymint/utils/channel" ) - - - - - +// SubmitLoop is the main loop for submitting blocks to the DA and SL layers. +// It submits a batch when either +// 1) It accumulates enough block data, so it's necessary to submit a batch to avoid exceeding the max size +// 2) Enough time passed since the last submitted batch, so it's necessary to submit a batch to avoid exceeding the max time +// It will back pressure (pause) block production if it falls too far behind. func (m *Manager) SubmitLoop(ctx context.Context, bytesProduced chan int, ) (err error) { @@ -39,41 +39,41 @@ func (m *Manager) SubmitLoop(ctx context.Context, ) } - +// SubmitLoopInner is a unit testable impl of SubmitLoop func SubmitLoopInner( ctx context.Context, logger types.Logger, - bytesProduced chan int, - maxSkewTime time.Duration, - unsubmittedBlocksNum func() uint64, - unsubmittedBlocksBytes func() int, - batchSkewTime func() time.Duration, - maxBatchSubmitTime time.Duration, - maxBatchSubmitBytes uint64, + bytesProduced chan int, // a channel of block and commit bytes produced + maxSkewTime time.Duration, // max time between last submitted block and last produced block allowed. if this threshold is reached block production is stopped. + unsubmittedBlocksNum func() uint64, // func that returns the amount of non-submitted blocks + unsubmittedBlocksBytes func() int, // func that returns bytes from non-submitted blocks + batchSkewTime func() time.Duration, // func that returns measured time between last submitted block and last produced block + maxBatchSubmitTime time.Duration, // max time to allow between batches + maxBatchSubmitBytes uint64, // max size of serialised batch in bytes createAndSubmitBatch func(maxSizeBytes uint64) (bytes uint64, err error), ) error { eg, ctx := errgroup.WithContext(ctx) pendingBytes := atomic.Uint64{} - trigger := uchannel.NewNudger() - submitter := uchannel.NewNudger() + trigger := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on trigger thread + submitter := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on submitter thread eg.Go(func() error { - - + // 'trigger': this thread is responsible for waking up the submitter when a new block arrives, and back-pressures the block production loop + // if it gets too far ahead. for { select { case <-ctx.Done(): return nil case n := <-bytesProduced: - pendingBytes.Add(uint64(n)) + pendingBytes.Add(uint64(n)) //nolint:gosec // bytes size is always positive logger.Debug("Added bytes produced to bytes pending submission counter.", "bytes added", n, "pending", pendingBytes.Load()) } submitter.Nudge() - + // if the time between the last produced block and last submitted is greater than maxSkewTime we block here until we get a progress nudge from the submitter thread if maxSkewTime < batchSkewTime() { select { case <-ctx.Done(): @@ -86,7 +86,7 @@ func SubmitLoopInner( }) eg.Go(func() error { - + // 'submitter': this thread actually creates and submits batches. this thread is woken up every batch_submit_time (in addition to every block produced) to check if there is anything to submit even if no new blocks have been produced ticker := time.NewTicker(maxBatchSubmitTime) for { select { @@ -98,7 +98,7 @@ func SubmitLoopInner( pending := pendingBytes.Load() - + // while there are accumulated blocks, create and submit batches!! for { done := ctx.Err() != nil nothingToSubmit := pending == 0 @@ -119,22 +119,22 @@ func SubmitLoopInner( logger.Error("Create and submit batch", "err", err, "pending", pending) panic(err) } - - + // this could happen if we timed-out waiting for acceptance in the previous iteration, but the batch was indeed submitted. + // we panic here cause restarting may reset the last batch submitted counter and the sequencer can potentially resume submitting batches. if errors.Is(err, gerrc.ErrAlreadyExists) { logger.Debug("Batch already accepted", "err", err, "pending", pending) panic(err) } return err } - pending = uint64(unsubmittedBlocksBytes()) - + pending = uint64(unsubmittedBlocksBytes()) //nolint:gosec // bytes size is always positive + // after new batch submitted we check the skew time to wake up 'trigger' thread and restart block production if batchSkewTime() < maxSkewTime { trigger.Nudge() } logger.Debug("Submitted a batch to both sub-layers.", "n bytes consumed from pending", nConsumed, "pending after", pending, "skew time", batchSkewTime()) } - + // update pendingBytes with non submitted block bytes after all pending batches have been submitted pendingBytes.Store(pending) } }) @@ -142,25 +142,25 @@ func SubmitLoopInner( return eg.Wait() } - - - +// CreateAndSubmitBatchGetSizeBlocksCommits creates and submits a batch to the DA and SL. +// Returns size of block and commit bytes +// max size bytes is the maximum size of the serialized batch type func (m *Manager) CreateAndSubmitBatchGetSizeBlocksCommits(maxSize uint64) (uint64, error) { b, err := m.CreateAndSubmitBatch(maxSize, false) if b == nil { return 0, err } - return uint64(b.SizeBlockAndCommitBytes()), err + return uint64(b.SizeBlockAndCommitBytes()), err //nolint:gosec // size is always positive and falls in uint64 } - - +// CreateAndSubmitBatch creates and submits a batch to the DA and SL. +// max size bytes is the maximum size of the serialized batch type func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*types.Batch, error) { startHeight := m.NextHeightToSubmit() endHeightInclusive := m.State.Height() if endHeightInclusive < startHeight { - + // TODO: https://github.com/dymensionxyz/dymint/issues/999 return nil, fmt.Errorf( "next height to submit is greater than last block height, create and submit batch should not have been called: start height: %d: end height inclusive: %d: %w", startHeight, @@ -173,7 +173,7 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty if err != nil { return nil, fmt.Errorf("create batch: %w", err) } - + // This is the last batch, so we need to mark it as such if lastBatch && b.EndHeight() == endHeightInclusive { b.LastBatch = true } @@ -187,8 +187,8 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty return b, nil } - - +// CreateBatch looks through the store for any unsubmitted blocks and commits and bundles them into a batch +// max size bytes is the maximum size of the serialized batch type func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeightInclusive uint64) (*types.Batch, error) { batchSize := endHeightInclusive - startHeight + 1 batch := &types.Batch{ @@ -211,7 +211,7 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight return nil, fmt.Errorf("load drs version: h: %d: %w", h, err) } - + // check all blocks have the same revision if len(batch.Blocks) > 0 && batch.Blocks[len(batch.Blocks)-1].GetRevision() != block.GetRevision() { return nil, fmt.Errorf("create batch: batch includes blocks with different revisions: %w", gerrc.ErrInternal) } @@ -221,9 +221,9 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight batch.DRSVersion = append(batch.DRSVersion, drsVersion) totalSize := batch.SizeBytes() - if maxBatchSize < uint64(totalSize) { + if maxBatchSize < uint64(totalSize) { //nolint:gosec // size is always positive and falls in uint64 - + // Remove the last block and commit from the batch batch.Blocks = batch.Blocks[:len(batch.Blocks)-1] batch.Commits = batch.Commits[:len(batch.Commits)-1] batch.DRSVersion = batch.DRSVersion[:len(batch.DRSVersion)-1] @@ -256,17 +256,19 @@ func (m *Manager) SubmitBatch(batch *types.Batch) error { types.RollappHubHeightGauge.Set(float64(batch.EndHeight())) m.LastSettlementHeight.Store(batch.EndHeight()) - + // update last submitted block time with batch last block (used to calculate max skew time) m.LastBlockTimeInSettlement.Store(batch.Blocks[len(batch.Blocks)-1].Header.GetTimestamp().UTC().UnixNano()) return err } - - +// GetUnsubmittedBytes returns the total number of unsubmitted bytes produced an element on a channel +// Intended only to be used at startup, before block production and submission loops start func (m *Manager) GetUnsubmittedBytes() int { total := 0 - + /* + On node start we want to include the count of any blocks which were produced and not submitted in a previous instance + */ currH := m.State.Height() for h := m.NextHeightToSubmit(); h <= currH; h++ { @@ -294,8 +296,8 @@ func (m *Manager) GetUnsubmittedBlocks() uint64 { return m.State.Height() - m.LastSettlementHeight.Load() } - - +// UpdateLastSubmittedHeight will update last height submitted height upon events. +// This may be necessary in case we crashed/restarted before getting response for our submission to the settlement layer. func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -312,7 +314,7 @@ func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { } } - +// GetBatchSkewTime returns the time between the last produced block and the last block submitted to SL func (m *Manager) GetBatchSkewTime() time.Duration { lastProducedTime := time.Unix(0, m.LastBlockTime.Load()) lastSubmittedTime := time.Unix(0, m.LastBlockTimeInSettlement.Load()) diff --git a/block/sync.go b/block/sync.go index bef64587e..9c3605669 100644 --- a/block/sync.go +++ b/block/sync.go @@ -12,7 +12,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) - +// onNewStateUpdate will update the last submitted height and will update sequencers list from SL. After, it triggers syncing or validation, depending whether it needs to sync first or only validate. func (m *Manager) onNewStateUpdate(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -20,32 +20,32 @@ func (m *Manager) onNewStateUpdate(event pubsub.Message) { return } - + // Update heights based on state update end height m.LastSettlementHeight.Store(eventData.EndHeight) - + // Update sequencers list from SL err := m.UpdateSequencerSetFromSL() if err != nil { - + // this error is not critical m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } if eventData.EndHeight > m.State.Height() { - + // trigger syncing from settlement last state update. m.triggerSettlementSyncing() - + // update target height used for syncing status rpc m.UpdateTargetHeight(eventData.EndHeight) } else { - + // trigger validation of the last state update available in settlement m.triggerSettlementValidation() } } +// SettlementSyncLoop listens for syncing triggers which indicate new settlement height updates, and attempts to sync to the last seen settlement height. +// Syncing triggers can be called when a new settlement state update event arrives or explicitly from the `updateFromLastSettlementState` method which is only being called upon startup. +// Upon new trigger, we know the settlement reached a new height we haven't seen before so a validation signal is sent to validate the settlement batch. - - - - +// Note: even when a sync is triggered, there is no guarantee that the batch will be applied from settlement as there is a race condition with the p2p/blocksync for syncing. func (m *Manager) SettlementSyncLoop(ctx context.Context) error { for { select { @@ -55,12 +55,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("syncing to target height", "targetHeight", m.LastSettlementHeight.Load()) for currH := m.State.NextHeight(); currH <= m.LastSettlementHeight.Load(); currH = m.State.NextHeight() { - + // if context has been cancelled, stop syncing if ctx.Err() != nil { return nil } - - + // if we have the block locally, we don't need to fetch it from the DA. + // it will only happen in case of rollback. err := m.applyLocalBlock() if err == nil { m.logger.Info("Synced from local", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) @@ -76,12 +76,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } m.logger.Info("Retrieved state update from SL.", "state_index", settlementBatch.StateIndex) - + // we update LastBlockTimeInSettlement to be able to measure batch skew time with last block time in settlement m.LastBlockTimeInSettlement.Store(settlementBatch.BlockDescriptors[len(settlementBatch.BlockDescriptors)-1].GetTimestamp().UTC().UnixNano()) err = m.ApplyBatchFromSL(settlementBatch.Batch) - + // this will keep sync loop alive when DA is down or retrievals are failing because DA issues. if errors.Is(err, da.ErrRetrieval) { continue } @@ -91,7 +91,7 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("Synced from DA", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) - + // trigger state update validation, after each state update is applied m.triggerSettlementValidation() err = m.attemptApplyCachedBlocks() @@ -101,10 +101,10 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } - + // avoid notifying as synced in case it fails before if m.State.Height() >= m.LastSettlementHeight.Load() { m.logger.Info("Synced.", "current height", m.State.Height(), "last submitted height", m.LastSettlementHeight.Load()) - + // nudge to signal to any listens that we're currently synced with the last settlement height we've seen so far m.syncedFromSettlement.Nudge() } @@ -112,14 +112,14 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } } - +// waitForSyncing waits for synced nudge (in case it needs to because it was syncing) func (m *Manager) waitForSettlementSyncing() { if m.State.Height() < m.LastSettlementHeight.Load() { <-m.syncedFromSettlement.C } } - +// triggerStateUpdateSyncing sends signal to channel used by syncing loop func (m *Manager) triggerSettlementSyncing() { select { case m.settlementSyncingC <- struct{}{}: @@ -128,7 +128,7 @@ func (m *Manager) triggerSettlementSyncing() { } } - +// triggerStateUpdateValidation sends signal to channel used by validation loop func (m *Manager) triggerSettlementValidation() { select { case m.settlementValidationC <- struct{}{}: diff --git a/block/validate.go b/block/validate.go index e4078fe8a..d2a86d07f 100644 --- a/block/validate.go +++ b/block/validate.go @@ -11,8 +11,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - - +// onNewStateUpdateFinalized will update the last validated height with the last finalized height. +// Unlike pending heights, once heights are finalized, we treat them as validated as there is no point validating finalized heights. func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -22,7 +22,7 @@ func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { m.SettlementValidator.UpdateLastValidatedHeight(eventData.EndHeight) } - +// SettlementValidateLoop listens for syncing events (from new state update or from initial syncing) and validates state updates to the last submitted height. func (m *Manager) SettlementValidateLoop(ctx context.Context) error { for { select { @@ -33,14 +33,14 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { m.logger.Info("validating state updates to target height", "targetHeight", targetValidationHeight) for currH := m.SettlementValidator.NextValidationHeight(); currH <= targetValidationHeight; currH = m.SettlementValidator.NextValidationHeight() { - + // get next batch that needs to be validated from SL batch, err := m.SLClient.GetBatchAtHeight(currH) if err != nil { uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) return err } - + // validate batch err = m.SettlementValidator.ValidateStateUpdate(batch) if err != nil { if errors.Is(err, gerrc.ErrFault) { @@ -51,7 +51,7 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { return err } - + // update the last validated height to the batch last block height m.SettlementValidator.UpdateLastValidatedHeight(batch.EndHeight) m.logger.Debug("state info validated", "lastValidatedHeight", m.SettlementValidator.GetLastValidatedHeight()) diff --git a/cmd/dymint/commands/init.go b/cmd/dymint/commands/init.go index ce3ee91e3..9587731fd 100644 --- a/cmd/dymint/commands/init.go +++ b/cmd/dymint/commands/init.go @@ -14,7 +14,7 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" ) - +// InitFilesCmd initialises a fresh Dymint Core instance. var InitFilesCmd = &cobra.Command{ Use: "init", Short: "Initialize Dymint", @@ -25,9 +25,9 @@ func initFiles(cmd *cobra.Command, args []string) error { return InitFilesWithConfig(tmconfig) } - +// InitFilesWithConfig initialises a fresh Dymint instance. func InitFilesWithConfig(config *cfg.Config) error { - + // private validator privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() var pv *privval.FilePV @@ -52,7 +52,7 @@ func InitFilesWithConfig(config *cfg.Config) error { logger.Info("Generated node key", "path", nodeKeyFile) } - + // genesis file genFile := config.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) diff --git a/cmd/dymint/commands/root.go b/cmd/dymint/commands/root.go index 8db70aedc..af981f80e 100644 --- a/cmd/dymint/commands/root.go +++ b/cmd/dymint/commands/root.go @@ -28,8 +28,8 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", tmconfig.LogLevel, "log level") } - - +// ParseConfig retrieves the default environment configuration, +// sets up the Dymint root and ensures that the root exists func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) @@ -60,14 +60,14 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { return conf, nil } - +// RootCmd is the root command for Dymint core. var RootCmd = &cobra.Command{ Use: "dymint", Short: "ABCI-client implementation for dymension's autonomous rollapps", PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { v := viper.GetViper() - + // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := v.BindPFlags(cmd.Flags()); err != nil { return err } diff --git a/cmd/dymint/commands/show_node_id.go b/cmd/dymint/commands/show_node_id.go index 30d3c9e87..1ca1b3322 100644 --- a/cmd/dymint/commands/show_node_id.go +++ b/cmd/dymint/commands/show_node_id.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) - +// ShowNodeIDCmd dumps node's ID to the standard output. var ShowNodeIDCmd = &cobra.Command{ Use: "show-node-id", Aliases: []string{"show_node_id"}, @@ -27,7 +27,7 @@ func showNodeID(cmd *cobra.Command, args []string) error { if err != nil { return err } - + // convert nodeKey to libp2p key host, err := libp2p.New(libp2p.Identity(signingKey)) if err != nil { return err diff --git a/cmd/dymint/commands/show_sequencer.go b/cmd/dymint/commands/show_sequencer.go index 2faff6840..cb6e72955 100644 --- a/cmd/dymint/commands/show_sequencer.go +++ b/cmd/dymint/commands/show_sequencer.go @@ -9,13 +9,13 @@ import ( "github.com/tendermint/tendermint/privval" ) - +// ShowSequencer adds capabilities for showing the validator info. var ShowSequencer = &cobra.Command{ Use: "show-sequencer", Aliases: []string{"show_sequencer"}, Short: "Show this node's sequencer info", RunE: showSequencer, - + // PreRun: deprecateSnakeCase, } func showSequencer(cmd *cobra.Command, args []string) error { diff --git a/cmd/dymint/commands/start.go b/cmd/dymint/commands/start.go index 1615ff2cd..3bfa6e503 100644 --- a/cmd/dymint/commands/start.go +++ b/cmd/dymint/commands/start.go @@ -32,8 +32,8 @@ import ( var genesisHash []byte - - +// NewRunNodeCmd returns the command that allows the CLI to start a node. +// It can be used with a custom PrivValidator and in-process ABCI application. func NewRunNodeCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", @@ -125,7 +125,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L logger.Info("Started dymint node") - + // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() { logger.Info("Caught SIGTERM. Exiting...") if dymintNode.IsRunning() { @@ -135,7 +135,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L } }) - + // Run forever. select {} } @@ -148,7 +148,7 @@ func checkGenesisHash(config *tmcfg.Config) error { return nil } - + // Calculate SHA-256 hash of the genesis file. f, err := os.Open(config.GenesisFile()) if err != nil { return fmt.Errorf("can't open genesis file: %w", err) @@ -164,7 +164,7 @@ func checkGenesisHash(config *tmcfg.Config) error { } actualHash := h.Sum(nil) - + // Compare with the flag. if !bytes.Equal(genesisHash, actualHash) { return fmt.Errorf( "--genesis_hash=%X does not match %s hash: %X", diff --git a/cmd/dymint/main.go b/cmd/dymint/main.go index 200c33f82..631383649 100644 --- a/cmd/dymint/main.go +++ b/cmd/dymint/main.go @@ -20,7 +20,7 @@ func main() { cli.NewCompletionCmd(rootCmd, true), ) - + // Create & start node rootCmd.AddCommand(commands.NewRunNodeCmd()) cmd := cli.PrepareBaseCmd(rootCmd, "DM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultDymintDir))) diff --git a/config/config.go b/config/config.go index 65b9e09e3..c19c58277 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,7 @@ import ( ) const ( - + // DefaultDymintDir is the default directory for dymint DefaultDymintDir = ".dymint" DefaultConfigDirName = "config" DefaultConfigFileName = "dymint.toml" @@ -23,63 +23,63 @@ const ( MaxBatchSubmitTime = 1 * time.Hour ) - +// NodeConfig stores Dymint node configuration. type NodeConfig struct { - + // parameters below are translated from existing config RootDir string DBPath string RPC RPCConfig MempoolConfig tmcfg.MempoolConfig - + // parameters below are dymint specific and read from config BlockManagerConfig `mapstructure:",squash"` DAConfig string `mapstructure:"da_config"` SettlementLayer string `mapstructure:"settlement_layer"` SettlementConfig settlement.Config `mapstructure:",squash"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` - + // Config params for mock grpc da DAGrpc grpc.Config `mapstructure:",squash"` - + // P2P Options P2PConfig `mapstructure:",squash"` - + // DB Options DBConfig `mapstructure:"db"` } - +// BlockManagerConfig consists of all parameters required by BlockManagerConfig type BlockManagerConfig struct { - + // BlockTime defines how often new blocks are produced BlockTime time.Duration `mapstructure:"block_time"` - + // MaxIdleTime defines how long should block manager wait for new transactions before producing empty block MaxIdleTime time.Duration `mapstructure:"max_idle_time"` - + // MaxProofTime defines the max time to be idle, if txs that requires proof were included in last block MaxProofTime time.Duration `mapstructure:"max_proof_time"` - + // BatchSubmitMaxTime is how long should block manager wait for before submitting batch BatchSubmitTime time.Duration `mapstructure:"batch_submit_time"` - + // MaxSkewTime is the number of batches waiting to be submitted. Block production will be paused if this limit is reached. MaxSkewTime time.Duration `mapstructure:"max_skew_time"` - + // The size of the batch of blocks and commits in Bytes. We'll write every batch to the DA and the settlement layer. BatchSubmitBytes uint64 `mapstructure:"batch_submit_bytes"` - + // SequencerSetUpdateInterval defines the interval at which to fetch sequencer updates from the settlement layer SequencerSetUpdateInterval time.Duration `mapstructure:"sequencer_update_interval"` } - +// GetViperConfig reads configuration parameters from Viper instance. func (nc *NodeConfig) GetViperConfig(cmd *cobra.Command, homeDir string) error { v := viper.GetViper() - + // Loads dymint toml config file EnsureRoot(homeDir, nil) v.SetConfigName("dymint") - v.AddConfigPath(homeDir) - v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) + v.AddConfigPath(homeDir) // search root directory + v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) // search root directory /config - + // bind flags so we could override config file with flags err := BindDymintFlags(cmd, v) if err != nil { return err } - + // Read viper config err = v.ReadInConfig() if err != nil { return err @@ -126,7 +126,7 @@ func (nc NodeConfig) Validate() error { return nil } - +// Validate BlockManagerConfig func (c BlockManagerConfig) Validate() error { if c.BlockTime < MinBlockTime { return fmt.Errorf("block_time cannot be less than %s", MinBlockTime) @@ -139,7 +139,7 @@ func (c BlockManagerConfig) Validate() error { if c.MaxIdleTime < 0 { return fmt.Errorf("max_idle_time must be positive or zero to disable") } - + // MaxIdleTime zero disables adaptive block production. if c.MaxIdleTime != 0 { if c.MaxIdleTime <= c.BlockTime || c.MaxIdleTime > MaxBatchSubmitTime { return fmt.Errorf("max_idle_time must be greater than block_time and not greater than %s", MaxBatchSubmitTime) @@ -203,14 +203,14 @@ func (nc NodeConfig) validateInstrumentation() error { return nc.Instrumentation.Validate() } - +// InstrumentationConfig defines the configuration for metrics reporting. type InstrumentationConfig struct { - - - + // When true, Prometheus metrics are served under /metrics on + // PrometheusListenAddr. + // Check out the documentation for the list of available metrics. Prometheus bool `mapstructure:"prometheus"` - + // Address to listen for Prometheus collector(s) connections. PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` } @@ -222,11 +222,11 @@ func (ic InstrumentationConfig) Validate() error { return nil } - +// DBConfig holds configuration for the database. type DBConfig struct { - + // SyncWrites makes sure that data is written to disk before returning from a write operation. SyncWrites bool `mapstructure:"sync_writes"` - + // InMemory sets the database to run in-memory, without touching the disk. InMemory bool `mapstructure:"in_memory"` } diff --git a/config/defaults.go b/config/defaults.go index 0a75b14a6..b72ef3aac 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -9,7 +9,7 @@ import ( ) const ( - + // DefaultListenAddress is a default listen address for P2P client. DefaultListenAddress = "/ip4/0.0.0.0/tcp/26656" DefaultHomeDir = "sequencer_keys" @@ -17,10 +17,10 @@ const ( DefaultSequencerSetUpdateInterval = 3 * time.Minute ) - +// DefaultNodeConfig keeps default values of NodeConfig var DefaultNodeConfig = *DefaultConfig("") - +// DefaultConfig returns a default configuration for dymint node. func DefaultConfig(home string) *NodeConfig { cfg := &NodeConfig{ BlockManagerConfig: BlockManagerConfig{ @@ -57,7 +57,7 @@ func DefaultConfig(home string) *NodeConfig { } keyringDir := filepath.Join(home, DefaultHomeDir) - + // Setting default params for sl grpc mock defaultSlGrpcConfig := settlement.GrpcConfig{ Host: "127.0.0.1", Port: 7981, @@ -79,7 +79,7 @@ func DefaultConfig(home string) *NodeConfig { } cfg.SettlementConfig = defaultSLconfig - + // Setting default params for da grpc mock defaultDAGrpc := grpc.Config{ Host: "127.0.0.1", Port: 7980, diff --git a/config/flags.go b/config/flags.go index d476c39f2..1f1eaf83e 100644 --- a/config/flags.go +++ b/config/flags.go @@ -32,11 +32,11 @@ const ( FlagP2PBootstrapRetryTime = "dymint.p2p_config.bootstrap_retry_time" ) - - - +// AddNodeFlags adds Dymint specific configuration options to cobra Command. +// +// This function is called in cosmos-sdk. func AddNodeFlags(cmd *cobra.Command) { - + // Add tendermint default flags tmcmd.AddNodeFlags(cmd) def := DefaultNodeConfig @@ -58,7 +58,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String(FlagP2PListenAddress, def.P2PConfig.ListenAddress, "P2P listen address") cmd.Flags().String(FlagP2PBootstrapNodes, def.P2PConfig.BootstrapNodes, "P2P bootstrap nodes") cmd.Flags().Duration(FlagP2PBootstrapRetryTime, def.P2PConfig.BootstrapRetryTime, "P2P bootstrap time") - cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") + cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") //nolint:gosec // GossipSubCacheSize should be always positive } func BindDymintFlags(cmd *cobra.Command, v *viper.Viper) error { diff --git a/config/p2p.go b/config/p2p.go index 71b18b180..a2449ed43 100644 --- a/config/p2p.go +++ b/config/p2p.go @@ -5,27 +5,27 @@ import ( "time" ) - +// P2PConfig stores configuration related to peer-to-peer networking. type P2PConfig struct { - + // Listening address for P2P connections ListenAddress string `mapstructure:"p2p_listen_address"` - + // List of nodes used for P2P bootstrapping BootstrapNodes string `mapstructure:"p2p_bootstrap_nodes"` - + // List of nodes persistent P2P nodes PersistentNodes string `mapstructure:"p2p_persistent_nodes"` - + // Size of the Gossipsub router cache GossipSubCacheSize int `mapstructure:"p2p_gossip_cache_size"` - + // Time interval a node tries to bootstrap again, in case no nodes connected BootstrapRetryTime time.Duration `mapstructure:"p2p_bootstrap_retry_time"` - + // Param used to enable block sync from p2p BlockSyncEnabled bool `mapstructure:"p2p_blocksync_enabled"` - + // Time interval used by a node to request missing blocks (gap between cached blocks and local height) on demand from other peers using blocksync BlockSyncRequestIntervalTime time.Duration `mapstructure:"p2p_blocksync_block_request_interval"` - + // Param used to enable the advertisement of the node to be part of the P2P network in the DHT AdvertisingEnabled bool `mapstructure:"p2p_advertising_enabled"` } - +// Validate P2PConfig func (c P2PConfig) Validate() error { if c.GossipSubCacheSize < 0 { return fmt.Errorf("gossipsub cache size cannot be negative") diff --git a/config/rpc.go b/config/rpc.go index baa5e8e7b..d6b14303a 100644 --- a/config/rpc.go +++ b/config/rpc.go @@ -1,38 +1,38 @@ package config - +// RPCConfig holds RPC configuration params. type RPCConfig struct { ListenAddress string - + // Cross Origin Resource Sharing settings CORSAllowedOrigins []string CORSAllowedMethods []string CORSAllowedHeaders []string - - - - - - - + // Maximum number of simultaneous connections (including WebSocket). + // Does not include gRPC connections. See grpc-max-open-connections + // If you want to accept a larger number than the default, make sure + // you increase your OS limits. + // 0 - unlimited. + // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} + // 1024 - 40 - 10 - 50 = 924 = ~900 MaxOpenConnections int - - - - - - - - - + // The path to a file containing certificate that is used to create the HTTPS server. + // Might be either absolute path or path related to Tendermint's config directory. + // + // If the certificate is signed by a certificate authority, + // the certFile should be the concatenation of the server's certificate, any intermediates, + // and the CA's certificate. + // + // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. + // Otherwise, HTTP server is run. TLSCertFile string `mapstructure:"tls-cert-file"` - - - - - + // The path to a file containing matching private key that is used to create the HTTPS server. + // Might be either absolute path or path related to tendermint's config directory. + // + // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. + // Otherwise, HTTP server is run. TLSKeyFile string `mapstructure:"tls-key-file"` } diff --git a/config/toml.go b/config/toml.go index 4bf51e276..9ee3d544d 100644 --- a/config/toml.go +++ b/config/toml.go @@ -9,7 +9,7 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) - +// DefaultDirPerm is the default permissions used when creating directories. const DefaultDirPerm = 0o700 var configTemplate *template.Template @@ -24,10 +24,10 @@ func init() { } } +/****** these are for production settings ***********/ - - - +// EnsureRoot creates the root, config, and data directories if they don't exist, +// and panics if it fails. func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil { panic(err.Error()) @@ -42,13 +42,13 @@ func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { configFilePath := filepath.Join(rootDir, DefaultConfigDirName, DefaultConfigFileName) - + // Write default config file if missing. if !tmos.FileExists(configFilePath) { WriteConfigFile(configFilePath, defaultConfig) } } - +// WriteConfigFile renders config using the template and writes it to configFilePath. func WriteConfigFile(configFilePath string, config *NodeConfig) { var buffer bytes.Buffer @@ -59,8 +59,8 @@ func WriteConfigFile(configFilePath string, config *NodeConfig) { tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644) } - - +// Note: any changes to the comments/variables/mapstructure +// must be reflected in the appropriate struct in config/config.go const defaultConfigTemplate = ` ####################################################### ### Dymint Configuration Options ### diff --git a/conv/config.go b/conv/config.go index ec9b9e7f4..65498dc12 100644 --- a/conv/config.go +++ b/conv/config.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/config" ) - - - - +// GetNodeConfig translates Tendermint's configuration into Dymint configuration. +// +// This method only translates configuration, and doesn't verify it. If some option is missing in Tendermint's +// config, it's skipped during translation. func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf == nil { return errors.New("tendermint config is nil but required to populate Dymint config") @@ -31,7 +31,12 @@ func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf.Mempool == nil { return errors.New("tendermint mempool config is nil but required to populate Dymint config") } - + /* + In the above, we are copying the rpc/p2p from Tendermint's configuration to Dymint's configuration. + This was implemented by the original rollkit authors, and they have not provided any explanation for this. + + For the mempool we simply copy the object. If we want to be more selective, we can adjust later. + */ nodeConf.MempoolConfig = *tmConf.Mempool return nil diff --git a/conv/crypto.go b/conv/crypto.go index 4f04470fa..b2c49e18a 100644 --- a/conv/crypto.go +++ b/conv/crypto.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) - +// GetNodeKey creates libp2p private key from Tendermints NodeKey. func GetNodeKey(nodeKey *p2p.NodeKey) (crypto.PrivKey, error) { if nodeKey == nil || nodeKey.PrivKey == nil { return nil, ErrNilKey diff --git a/da/avail/avail.go b/da/avail/avail.go index 3d375b000..81c30b48b 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -34,7 +34,7 @@ const ( DataCallMethod = "submit_data" DataCallSectionIndex = 29 DataCallMethodIndex = 1 - maxBlobSize = 2097152 + maxBlobSize = 2097152 // 2MB according to Avail docs https://docs.availproject.org/docs/build-with-avail/overview#expandable-blockspace ) type SubstrateApiI interface { @@ -74,35 +74,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - +// WithClient is an option which sets the client. func WithClient(client SubstrateApiI) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).client = client } } - +// WithTxInclusionTimeout is an option which sets the timeout for waiting for transaction inclusion. func WithTxInclusionTimeout(timeout time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).txInclusionTimeout = timeout } } - +// WithBatchRetryDelay is an option which sets the delay between batch retries. func WithBatchRetryDelay(delay time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryDelay = delay } } - +// WithBatchRetryAttempts is an option which sets the number of batch retries. func WithBatchRetryAttempts(attempts uint) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryAttempts = attempts } } - +// Init initializes DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -114,18 +114,18 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S } } - + // Set defaults c.pubsubServer = pubsubServer c.txInclusionTimeout = defaultTxInculsionTimeout c.batchRetryDelay = defaultBatchRetryDelay c.batchRetryAttempts = defaultBatchRetryAttempts - + // Apply options for _, apply := range options { apply(c) } - + // If client wasn't set, create a new one if c.client == nil { substrateApiClient, err := gsrpc.NewSubstrateAPI(c.config.ApiURL) if err != nil { @@ -144,32 +144,32 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S return nil } - +// Start starts DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) Start() error { c.synced <- struct{}{} return nil } - +// Stop stops DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) Stop() error { c.cancel() close(c.synced) return nil } - +// WaitForSyncing is used to check when the DA light client finished syncing func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - +// GetClientType returns client type. func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Avail } - +// RetrieveBatches retrieves batch from DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - + //nolint:typecheck blockHash, err := c.client.GetBlockHash(daMetaData.Height) if err != nil { return da.ResultRetrieveBatch{ @@ -190,10 +190,10 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet }, } } - + // Convert the data returned to batches var batches []*types.Batch for _, ext := range block.Block.Extrinsics { - + // these values below are specific indexes only for data submission, differs with each extrinsic if ext.Signature.AppID.Int64() == c.config.AppID && ext.Method.CallIndex.SectionIndex == DataCallSectionIndex && ext.Method.CallIndex.MethodIndex == DataCallMethodIndex { @@ -206,16 +206,16 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet c.logger.Error("unmarshal batch", "daHeight", daMetaData.Height, "error", err) continue } - + // Convert the proto batch to a batch batch := &types.Batch{} err = batch.FromProto(&pbBatch) if err != nil { c.logger.Error("batch from proto", "daHeight", daMetaData.Height, "error", err) continue } - + // Add the batch to the list batches = append(batches, batch) - + // Remove the bytes we just decoded. data = data[proto.Size(&pbBatch):] } @@ -233,7 +233,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet } } - +// SubmitBatch submits batch to DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { blob, err := batch.MarshalBinary() if err != nil { @@ -250,8 +250,8 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return c.submitBatchLoop(blob) } - - +// submitBatchLoop tries submitting the batch. In case we get a configuration error we would like to stop trying, +// otherwise, for network error we keep trying indefinitely. func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.ResultSubmitBatch { for { select { @@ -318,8 +318,8 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result } } - - +// broadcastTx broadcasts the transaction to the network and in case of success +// returns the block height the batch was included in. func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { meta, err := c.client.GetMetadataLatest() if err != nil { @@ -329,7 +329,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - + // Create the extrinsic ext := availtypes.NewExtrinsic(newCall) genesisHash, err := c.client.GetBlockHash(0) if err != nil { @@ -343,7 +343,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - + // Get the account info for the nonce key, err := availtypes.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) @@ -364,16 +364,16 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { SpecVersion: rv.SpecVersion, Tip: availtypes.NewUCompactFromUInt(c.config.Tip), TransactionVersion: rv.TransactionVersion, - AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), + AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), //nolint:gosec // AppID should be always positive } - + // Sign the transaction using Alice's default account err = ext.Sign(keyringPair, options) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - + // Send the extrinsic sub, err := c.client.SubmitAndWatchExtrinsic(ext) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastNetworkError, err) @@ -419,7 +419,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { } } - +// CheckBatchAvailability checks batch availability in DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -429,7 +429,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } - +// getHeightFromHash returns the block height from the block hash func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (uint64, error) { c.logger.Debug("Getting block height from hash", "hash", hash) header, err := c.client.GetHeader(hash) @@ -439,12 +439,12 @@ func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (u return uint64(header.Number), nil } - +// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } - +// GetBalance returns the balance for a specific address func (c *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { return da.Balance{}, nil } diff --git a/da/celestia/celestia.go b/da/celestia/celestia.go index 6eda30bf5..80cd32f85 100644 --- a/da/celestia/celestia.go +++ b/da/celestia/celestia.go @@ -26,7 +26,7 @@ import ( uretry "github.com/dymensionxyz/dymint/utils/retry" ) - +// DataAvailabilityLayerClient use celestia-node public API. type DataAvailabilityLayerClient struct { rpc celtypes.CelestiaRPCClient @@ -43,35 +43,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - +// WithRPCClient sets rpc client. func WithRPCClient(rpc celtypes.CelestiaRPCClient) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).rpc = rpc } } - +// WithRPCRetryDelay sets failed rpc calls retry delay. func WithRPCRetryDelay(delay time.Duration) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryDelay = delay } } - +// WithRPCAttempts sets failed rpc calls retry attempts. func WithRPCAttempts(attempts int) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryAttempts = &attempts } } - +// WithSubmitBackoff sets submit retry delay config. func WithSubmitBackoff(c uretry.BackoffConfig) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.Backoff = c } } - +// Init initializes DataAvailabilityLayerClient instance. func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -85,7 +85,7 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S c.pubsubServer = pubsubServer - + // Apply options for _, apply := range options { apply(c) } @@ -113,7 +113,7 @@ func createConfig(bz []byte) (c Config, err error) { return c, errors.New("gas prices must be set") } - + // NOTE: 0 is valid value for RetryAttempts if c.RetryDelay == 0 { c.RetryDelay = defaultRpcRetryDelay @@ -128,11 +128,11 @@ func createConfig(bz []byte) (c Config, err error) { return c, nil } - +// Start prepares DataAvailabilityLayerClient to work. func (c *DataAvailabilityLayerClient) Start() (err error) { c.logger.Info("Starting Celestia Data Availability Layer Client.") - + // other client has already been set if c.rpc != nil { c.logger.Info("Celestia-node client already set.") return nil @@ -150,7 +150,7 @@ func (c *DataAvailabilityLayerClient) Start() (err error) { return } - +// Stop stops DataAvailabilityLayerClient. func (c *DataAvailabilityLayerClient) Stop() error { c.logger.Info("Stopping Celestia Data Availability Layer Client.") err := c.pubsubServer.Stop() @@ -162,17 +162,17 @@ func (c *DataAvailabilityLayerClient) Stop() error { return nil } - +// WaitForSyncing is used to check when the DA light client finished syncing func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - +// GetClientType returns client type. func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Celestia } - +// SubmitBatch submits a batch to the DA layer. func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { data, err := batch.MarshalBinary() if err != nil { @@ -204,10 +204,10 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{} default: - + // TODO(srene): Split batch in multiple blobs if necessary if supported height, commitment, err := c.submit(data) if errors.Is(err, gerrc.ErrInternal) { - + // no point retrying if it's because of our code being wrong err = fmt.Errorf("submit: %w", err) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -273,7 +273,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet resultRetrieveBatch = c.retrieveBatches(daMetaData) return resultRetrieveBatch.Error }, - retry.Attempts(uint(*c.config.RetryAttempts)), + retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -368,7 +368,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu return nil }, - retry.Attempts(uint(*c.config.RetryAttempts)), + retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -392,7 +392,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu dah, err := c.getDataAvailabilityHeaders(daMetaData.Height) if err != nil { - + // Returning Data Availability header Data Root for dispute validation return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -407,10 +407,10 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu proof, err := c.getProof(daMetaData) if err != nil || proof == nil { - - - - + // TODO (srene): Not getting proof means there is no existing data for the namespace and the commitment (the commitment is wrong). + // Therefore we need to prove whether the commitment is wrong or the span does not exists. + // In case the span is correct it is necessary to return the data for the span and the proofs to the data root, so we can prove the data + // is the data for the span, and reproducing the commitment will generate a different one. return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -433,9 +433,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu if daMetaData.Index > 0 && daMetaData.Length > 0 { if index != daMetaData.Index || shares != daMetaData.Length { - - - + // TODO (srene): In this case the commitment is correct but does not match the span. + // If the span is correct we have to repeat the previous step (sending data + proof of data) + // In case the span is not correct we need to send unavailable proof by sending proof of any row root to data root return da.ResultCheckBatch{ CheckMetaData: DACheckMetaData, BaseResult: da.BaseResult{ @@ -449,9 +449,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } included, err = c.validateProof(daMetaData, proof) - - - + // The both cases below (there is an error validating the proof or the proof is wrong) should not happen + // if we consider correct functioning of the celestia light node. + // This will only happen in case the previous step the celestia light node returned wrong proofs.. if err != nil { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -485,7 +485,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } } - +// Submit submits the Blobs to Data Availability layer. func (c *DataAvailabilityLayerClient) submit(daBlob da.Blob) (uint64, da.Commitment, error) { blobs, commitments, err := c.blobsAndCommitments(daBlob) if err != nil { @@ -554,7 +554,7 @@ func (c *DataAvailabilityLayerClient) getDataAvailabilityHeaders(height uint64) return headers.DAH, nil } - +// Celestia syncing in background func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { sync := func() error { done := make(chan error, 1) @@ -579,7 +579,7 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } err := retry.Do(sync, - retry.Attempts(0), + retry.Attempts(0), // try forever retry.Delay(10*time.Second), retry.LastErrorOnly(true), retry.DelayType(retry.FixedDelay), @@ -596,12 +596,12 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } } - +// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSizeBytes } - +// GetSignerBalance returns the balance for a specific address func (d *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ctx, cancel := context.WithTimeout(d.ctx, d.config.Timeout) defer cancel() diff --git a/da/celestia/config.go b/da/celestia/config.go index a1f764d4d..025a42e33 100644 --- a/da/celestia/config.go +++ b/da/celestia/config.go @@ -24,7 +24,7 @@ var defaultSubmitBackoff = uretry.NewBackoffConfig( uretry.WithMaxDelay(time.Second*6), ) - +// Config stores Celestia DALC configuration parameters. type Config struct { BaseURL string `json:"base_url,omitempty"` AppNodeURL string `json:"app_node_url,omitempty"` @@ -60,13 +60,13 @@ func (c *Config) InitNamespaceID() error { if c.NamespaceIDStr == "" { c.NamespaceIDStr = generateRandNamespaceID() } - + // Decode NamespaceID from string to byte array namespaceBytes, err := hex.DecodeString(c.NamespaceIDStr) if err != nil { return fmt.Errorf("decode string: %w", err) } - + // Check if NamespaceID is of correct length (10 bytes) if len(namespaceBytes) != openrpcns.NamespaceVersionZeroIDSize { return fmt.Errorf("wrong length: got: %v: expect %v", len(namespaceBytes), openrpcns.NamespaceVersionZeroIDSize) } diff --git a/da/celestia/mock/messages.go b/da/celestia/mock/messages.go index d0140a084..cf97dd2c5 100644 --- a/da/celestia/mock/messages.go +++ b/da/celestia/mock/messages.go @@ -5,8 +5,8 @@ import ( "encoding/binary" ) - - +// This code is extracted from celestia-app. It's here to build shares from messages (serialized blocks). +// TODO(tzdybal): if we stop using `/namespaced_shares` we can get rid of this file. const ( shareSize = 256 @@ -14,8 +14,8 @@ const ( msgShareSize = shareSize - namespaceSize ) - - +// splitMessage breaks the data in a message into the minimum number of +// namespaced shares func splitMessage(rawData []byte, nid []byte) []NamespacedShare { shares := make([]NamespacedShare, 0) firstRawShare := append(append( @@ -40,10 +40,10 @@ func splitMessage(rawData []byte, nid []byte) []NamespacedShare { return shares } - +// Share contains the raw share data without the corresponding namespace. type Share []byte - +// NamespacedShare extends a Share with the corresponding namespace. type NamespacedShare struct { Share ID []byte @@ -68,8 +68,8 @@ func zeroPadIfNecessary(share []byte, width int) []byte { return share } - - +// marshalDelimited marshals the raw data (excluding the namespace) of this +// message and prefixes it with the length of that encoding. func marshalDelimited(data []byte) ([]byte, error) { lenBuf := make([]byte, binary.MaxVarintLen64) length := uint64(len(data)) @@ -77,8 +77,8 @@ func marshalDelimited(data []byte) ([]byte, error) { return append(lenBuf[:n], data...), nil } - - +// appendToShares appends raw data as shares. +// Used to build shares from blocks/messages. func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []NamespacedShare { if len(rawData) <= msgShareSize { rawShare := append(append( @@ -89,7 +89,7 @@ func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []Name paddedShare := zeroPadIfNecessary(rawShare, shareSize) share := NamespacedShare{paddedShare, nid} shares = append(shares, share) - } else { + } else { // len(rawData) > msgShareSize shares = append(shares, splitMessage(rawData, nid)...) } return shares diff --git a/da/celestia/mock/server.go b/da/celestia/mock/server.go index 98434285a..8b76d44fb 100644 --- a/da/celestia/mock/server.go +++ b/da/celestia/mock/server.go @@ -20,7 +20,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// Server mocks celestia-node HTTP API. type Server struct { da *local.DataAvailabilityLayerClient blockTime time.Duration @@ -28,7 +28,7 @@ type Server struct { logger types.Logger } - +// NewServer creates new instance of Server. func NewServer(blockTime time.Duration, logger types.Logger) *Server { return &Server{ da: new(local.DataAvailabilityLayerClient), @@ -37,7 +37,7 @@ func NewServer(blockTime time.Duration, logger types.Logger) *Server { } } - +// Start starts HTTP server with given listener. func (s *Server) Start(listener net.Listener) error { err := s.da.Init([]byte(s.blockTime.String()), pubsub.NewServer(), store.NewDefaultInMemoryKVStore(), s.logger) if err != nil { @@ -56,7 +56,7 @@ func (s *Server) Start(listener net.Listener) error { return nil } - +// Stop shuts down the Server. func (s *Server) Stop() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() diff --git a/da/celestia/rpc.go b/da/celestia/rpc.go index be0265f1f..f4dac9d64 100644 --- a/da/celestia/rpc.go +++ b/da/celestia/rpc.go @@ -14,49 +14,49 @@ import ( var _ types.CelestiaRPCClient = &OpenRPC{} - +// OpenRPC is a wrapper around the openrpc client. type OpenRPC struct { rpc *openrpc.Client } - +// NewOpenRPC creates a new openrpc client. func NewOpenRPC(rpc *openrpc.Client) *OpenRPC { return &OpenRPC{ rpc: rpc, } } - +// GetAll gets all blobs. func (c *OpenRPC) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { return c.rpc.Blob.GetAll(ctx, height, namespaces) } - +// Submit blobs. func (c *OpenRPC) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { return c.rpc.Blob.Submit(ctx, blobs, options) } - +// GetProof gets the proof for a specific share commitment. func (c *OpenRPC) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { return c.rpc.Blob.GetProof(ctx, height, namespace, commitment) } - +// Get blob for a specific share commitment func (c *OpenRPC) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { return c.rpc.Blob.Get(ctx, height, namespace, commitment) } - +// GetByHeight gets the header by height func (c *OpenRPC) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return c.rpc.Header.GetByHeight(ctx, height) } - +// Included checks if a blob is included in the chain func (c *OpenRPC) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { return c.rpc.Blob.Included(ctx, height, namespace, proof, commitment) } - +// GetSignerBalance balance for a specific address func (c *OpenRPC) GetSignerBalance(ctx context.Context) (*state.Balance, error) { return c.rpc.State.Balance(ctx) } diff --git a/da/celestia/types/rpc.go b/da/celestia/types/rpc.go index 8fded2362..2949f65ec 100644 --- a/da/celestia/types/rpc.go +++ b/da/celestia/types/rpc.go @@ -10,16 +10,16 @@ import ( ) type CelestiaRPCClient interface { - + /* ---------------------------------- blob ---------------------------------- */ Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) GetAll(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) - + /* --------------------------------- header --------------------------------- */ GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) - + /* ---------------------------------- state --------------------------------- */ GetSignerBalance(ctx context.Context) (*state.Balance, error) } diff --git a/da/celestia/types/types.go b/da/celestia/types/types.go index 52be192a6..9a10f3a0b 100644 --- a/da/celestia/types/types.go +++ b/da/celestia/types/types.go @@ -4,74 +4,74 @@ import ( "math" ) - - - - +// These constants were originally sourced from: +// https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants +// +// They can not change throughout the lifetime of a network. const ( - + // NamespaceVersionSize is the size of a namespace version in bytes. NamespaceVersionSize = 1 - - + // NamespaceVersionMaxValue is the maximum value a namespace version can be. + // This const must be updated if NamespaceVersionSize is changed. NamespaceVersionMaxValue = math.MaxUint8 - + // NamespaceIDSize is the size of a namespace ID in bytes. NamespaceIDSize = 28 - + // NamespaceSize is the size of a namespace (version + ID) in bytes. NamespaceSize = NamespaceVersionSize + NamespaceIDSize - + // ShareSize is the size of a share in bytes. ShareSize = 512 - - + // ShareInfoBytes is the number of bytes reserved for information. The info + // byte contains the share version and a sequence start idicator. ShareInfoBytes = 1 - - + // SequenceLenBytes is the number of bytes reserved for the sequence length + // that is present in the first share of a sequence. SequenceLenBytes = 4 - + // ShareVersionZero is the first share version format. ShareVersionZero = uint8(0) - - + // DefaultShareVersion is the defacto share version. Use this if you are + // unsure of which version to use. DefaultShareVersion = ShareVersionZero - - + // CompactShareReservedBytes is the number of bytes reserved for the location of + // the first unit (transaction, ISR) in a compact share. CompactShareReservedBytes = 4 - - + // FirstCompactShareContentSize is the number of bytes usable for data in + // the first compact share of a sequence. FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes - - + // ContinuationCompactShareContentSize is the number of bytes usable for + // data in a continuation compact share of a sequence. ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes - - + // FirstSparseShareContentSize is the number of bytes usable for data in the + // first sparse share of a sequence. FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - - + // ContinuationSparseShareContentSize is the number of bytes usable for data + // in a continuation sparse share of a sequence. ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - + // MinSquareSize is the smallest original square width. MinSquareSize = 1 - - + // MinshareCount is the minimum number of shares allowed in the original + // data square. MinShareCount = MinSquareSize * MinSquareSize - + // MaxShareVersion is the maximum value a share version can be. MaxShareVersion = 127 - + // Celestia matrix size DefaultGovMaxSquareSize = 64 - + // Default maximum bytes per blob allowed DefaultMaxBytes = DefaultGovMaxSquareSize * DefaultGovMaxSquareSize * ContinuationSparseShareContentSize ) diff --git a/da/da.go b/da/da.go index cd85c0e7d..3bde8023f 100644 --- a/da/da.go +++ b/da/da.go @@ -15,30 +15,30 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - - - +// StatusCode is a type for DA layer return status. +// TODO: define an enum of different non-happy-path cases +// that might need to be handled by Dymint independent of +// the underlying DA chain. Use int32 to match the protobuf +// enum representation. type StatusCode int32 - +// Commitment should contain serialized cryptographic commitment to Blob value. type Commitment = []byte - +// Blob is the data submitted/received from DA interface. type Blob = []byte - +// Data Availability return codes. const ( StatusUnknown StatusCode = iota StatusSuccess StatusError ) - +// Client defines all the possible da clients type Client string - +// Data availability clients const ( Mock Client = "mock" Celestia Client = "celestia" @@ -46,34 +46,34 @@ const ( Grpc Client = "grpc" ) - +// Option is a function that sets a parameter on the da layer. type Option func(DataAvailabilityLayerClient) - +// BaseResult contains basic information returned by DA layer. type BaseResult struct { - + // Code is to determine if the action succeeded. Code StatusCode - + // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) Message string - + // Error is the error returned by the DA layer Error error } - +// DAMetaData contains meta data about a batch on the Data Availability Layer. type DASubmitMetaData struct { - + // Height is the height of the block in the da layer Height uint64 - + // Namespace ID Namespace []byte - + // Client is the client to use to fetch data from the da layer Client Client - + // Share commitment, for each blob, used to obtain blobs and proofs Commitment Commitment - + // Initial position for each blob in the NMT Index int - + // Number of shares of each blob Length int - + // any NMT root for the specific height, necessary for non-inclusion proof Root []byte } @@ -84,9 +84,9 @@ type Balance struct { const PathSeparator = "|" - +// ToPath converts a DAMetaData to a path. func (d *DASubmitMetaData) ToPath() string { - + // convert uint64 to string if d.Commitment != nil { commitment := hex.EncodeToString(d.Commitment) dataroot := hex.EncodeToString(d.Root) @@ -109,7 +109,7 @@ func (d *DASubmitMetaData) ToPath() string { } } - +// FromPath parses a path to a DAMetaData. func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { pathParts := strings.FieldsFunc(path, func(r rune) bool { return r == rune(PathSeparator[0]) }) if len(pathParts) < 2 { @@ -125,7 +125,7 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { Height: height, Client: Client(pathParts[0]), } - + // TODO: check per DA and panic if not enough parts if len(pathParts) == 7 { submitData.Index, err = strconv.Atoi(pathParts[2]) if err != nil { @@ -152,93 +152,93 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { return submitData, nil } - +// DAMetaData contains meta data about a batch on the Data Availability Layer. type DACheckMetaData struct { - + // Height is the height of the block in the da layer Height uint64 - + // Client is the client to use to fetch data from the da layer Client Client - + // Submission index in the Hub SLIndex uint64 - + // Namespace ID Namespace []byte - + // Share commitment, for each blob, used to obtain blobs and proofs Commitment Commitment - + // Initial position for each blob in the NMT Index int - + // Number of shares of each blob Length int - + // Proofs necessary to validate blob inclusion in the specific height Proofs []*blob.Proof - + // NMT roots for each NMT Proof NMTRoots []byte - + // Proofs necessary to validate blob inclusion in the specific height RowProofs []*merkle.Proof - + // any NMT root for the specific height, necessary for non-inclusion proof Root []byte } - +// ResultSubmitBatch contains information returned from DA layer after block submission. type ResultSubmitBatch struct { BaseResult - + // DAHeight informs about a height on Data Availability Layer for given result. SubmitMetaData *DASubmitMetaData } - +// ResultCheckBatch contains information about block availability, returned from DA layer client. type ResultCheckBatch struct { BaseResult - + // DAHeight informs about a height on Data Availability Layer for given result. CheckMetaData *DACheckMetaData } - +// ResultRetrieveBatch contains batch of blocks returned from DA layer client. type ResultRetrieveBatch struct { BaseResult - - + // Block is the full block retrieved from Data Availability Layer. + // If Code is not equal to StatusSuccess, it has to be nil. Batches []*types.Batch - + // DAHeight informs about a height on Data Availability Layer for given result. CheckMetaData *DACheckMetaData } - - +// DataAvailabilityLayerClient defines generic interface for DA layer block submission. +// It also contains life-cycle methods. type DataAvailabilityLayerClient interface { - + // Init is called once to allow DA client to read configuration and initialize resources. Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...Option) error - + // Start is called once, after Init. It's implementation should start operation of DataAvailabilityLayerClient. Start() error - + // Stop is called once, when DataAvailabilityLayerClient is no longer needed. Stop() error - - - + // SubmitBatch submits the passed in block to the DA layer. + // This should create a transaction which (potentially) + // triggers a state transition in the DA layer. SubmitBatch(batch *types.Batch) ResultSubmitBatch GetClientType() Client - + // CheckBatchAvailability checks the availability of the blob submitted getting proofs and validating them CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch - + // Used to check when the DA light client finished syncing WaitForSyncing() - + // Returns the maximum allowed blob size in the DA, used to check the max batch size configured GetMaxBlobSizeBytes() uint32 - + // GetSignerBalance returns the balance for a specific address GetSignerBalance() (Balance, error) } - - +// BatchRetriever is additional interface that can be implemented by Data Availability Layer Client that is able to retrieve +// block data from DA layer. This gives the ability to use it for block synchronization. type BatchRetriever interface { - + // RetrieveBatches returns blocks at given data layer height from data availability layer. RetrieveBatches(daMetaData *DASubmitMetaData) ResultRetrieveBatch - + // CheckBatchAvailability checks the availability of the blob received getting proofs and validating them CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch } diff --git a/da/errors.go b/da/errors.go index dca7871b7..ba02343a8 100644 --- a/da/errors.go +++ b/da/errors.go @@ -7,26 +7,26 @@ import ( ) var ( - + // ErrFailedTxBuild is returned when transaction build fails. ErrTxBroadcastConfigError = errors.New("failed building tx") - + // ErrFailedTxBroadcast is returned when transaction broadcast fails. ErrTxBroadcastNetworkError = errors.New("failed broadcasting tx") - + // ErrTxBroadcastTimeout is returned when transaction broadcast times out. ErrTxBroadcastTimeout = errors.New("broadcast timeout error") - + // ErrUnableToGetProof is returned when proof is not available. ErrUnableToGetProof = errors.New("unable to get proof") - + // ErrRetrieval is returned when retrieval rpc falls ErrRetrieval = errors.New("retrieval failed") - + // ErrBlobNotFound is returned when blob is not found. ErrBlobNotFound = errors.New("blob not found") - + // ErrBlobNotIncluded is returned when blob is not included. ErrBlobNotIncluded = errors.New("blob not included") - + // ErrBlobNotParsed is returned when blob cannot be parsed ErrBlobNotParsed = errors.New("unable to parse blob to batch") - + // ErrProofNotMatching is returned when proof does not match. ErrProofNotMatching = errors.New("proof not matching") - + // ErrNameSpace is returned when wrong namespace used ErrNameSpace = errors.New("namespace not matching") - + // ErrDAMismatch is returned when the DA client used does not match the da client specified in the da path of the state update ErrDAMismatch = gerrc.ErrInvalidArgument.Wrap("DA in config not matching DA path") ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index 7daa0c667..8636cf583 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -16,9 +16,9 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -const maxBlobSize = 2097152 - +const maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) +// DataAvailabilityLayerClient is a generic client that proxies all DA requests via gRPC. type DataAvailabilityLayerClient struct { config Config @@ -28,14 +28,14 @@ type DataAvailabilityLayerClient struct { logger types.Logger } - +// Config contains configuration options for DataAvailabilityLayerClient. type Config struct { - + // TODO(tzdybal): add more options! Host string `json:"host"` Port int `json:"port"` } - +// DefaultConfig defines default values for DataAvailabilityLayerClient configuration. var DefaultConfig = Config{ Host: "127.0.0.1", Port: 7980, @@ -46,7 +46,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - +// Init sets the configuration options. func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { d.logger = logger d.synced = make(chan struct{}, 1) @@ -57,14 +57,14 @@ func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ st return json.Unmarshal(config, &d.config) } - +// Start creates connection to gRPC server and instantiates gRPC client. func (d *DataAvailabilityLayerClient) Start() error { d.logger.Info("starting GRPC DALC", "host", d.config.Host, "port", d.config.Port) d.synced <- struct{}{} var err error var opts []grpc.DialOption - + // TODO(tzdybal): add more options opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) d.conn, err = grpc.Dial(d.config.Host+":"+strconv.Itoa(d.config.Port), opts...) if err != nil { @@ -75,23 +75,23 @@ func (d *DataAvailabilityLayerClient) Start() error { return nil } - +// Stop closes connection to gRPC server. func (d *DataAvailabilityLayerClient) Stop() error { d.logger.Info("stopping GRPC DALC") return d.conn.Close() } - +// WaitForSyncing is used to check when the DA light client finished syncing func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - +// GetClientType returns client type. func (d *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Grpc } - +// SubmitBatch proxies SubmitBatch request to gRPC server. func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { resp, err := d.client.SubmitBatch(context.TODO(), &dalc.SubmitBatchRequest{Batch: batch.ToProto()}) if err != nil { @@ -111,7 +111,7 @@ func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } - +// CheckBatchAvailability proxies CheckBatchAvailability request to gRPC server. func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { resp, err := d.client.CheckBatchAvailability(context.TODO(), &dalc.CheckBatchAvailabilityRequest{DataLayerHeight: daMetaData.Height}) if err != nil { @@ -122,12 +122,12 @@ func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } - +// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } - +// RetrieveBatches proxies RetrieveBlocks request to gRPC server. func (d *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { resp, err := d.client.RetrieveBatches(context.TODO(), &dalc.RetrieveBatchesRequest{DataLayerHeight: daMetaData.Height}) if err != nil { diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index e303e2901..a8f21e508 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - +// GetServer creates and returns gRPC server instance. func GetServer(kv store.KV, conf grpcda.Config, mockConfig []byte) *grpc.Server { logger := tmlog.NewTMLogger(os.Stdout) diff --git a/da/local/local.go b/da/local/local.go index 009beaab8..3852b2797 100644 --- a/da/local/local.go +++ b/da/local/local.go @@ -1,7 +1,7 @@ package local import ( - "crypto/sha1" + "crypto/sha1" //#nosec "encoding/binary" "math/rand" "sync/atomic" @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - - +// DataAvailabilityLayerClient is intended only for usage in tests. +// It does actually ensures DA - it stores data in-memory. type DataAvailabilityLayerClient struct { logger types.Logger dalcKV store.KV @@ -26,7 +26,7 @@ type DataAvailabilityLayerClient struct { const ( defaultBlockTime = 3 * time.Second - maxBlobSize = 2097152 + maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) ) type config struct { @@ -38,7 +38,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - +// Init is called once to allow DA client to read configuration and initialize resources. func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalcKV store.KV, logger types.Logger, options ...da.Option) error { m.logger = logger m.dalcKV = dalcKV @@ -56,7 +56,7 @@ func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalc return nil } - +// Start implements DataAvailabilityLayerClient interface. func (m *DataAvailabilityLayerClient) Start() error { m.logger.Debug("Mock Data Availability Layer Client starting") m.synced <- struct{}{} @@ -70,26 +70,26 @@ func (m *DataAvailabilityLayerClient) Start() error { return nil } - +// Stop implements DataAvailabilityLayerClient interface. func (m *DataAvailabilityLayerClient) Stop() error { m.logger.Debug("Mock Data Availability Layer Client stopped") close(m.synced) return nil } - +// WaitForSyncing is used to check when the DA light client finished syncing func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - +// GetClientType returns client type. func (m *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Mock } - - - +// SubmitBatch submits the passed in batch to the DA layer. +// This should create a transaction which (potentially) +// triggers a state transition in the DA layer. func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { daHeight := m.daHeight.Load() @@ -99,7 +99,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) + hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) //#nosec err = m.dalcKV.Set(getKey(daHeight, batch.StartHeight()), hash[:]) if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} @@ -109,7 +109,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - m.daHeight.Store(daHeight + 1) + m.daHeight.Store(daHeight + 1) // guaranteed no ABA problem as submit batch is only called when the object is locked return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -123,13 +123,13 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } - +// CheckBatchAvailability queries DA layer to check data availability of block corresponding to given header. func (m *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { batchesRes := m.RetrieveBatches(daMetaData) return da.ResultCheckBatch{BaseResult: da.BaseResult{Code: batchesRes.Code, Message: batchesRes.Message, Error: batchesRes.Error}, CheckMetaData: batchesRes.CheckMetaData} } - +// RetrieveBatches returns block at given height from data availability layer. func (m *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { if daMetaData.Height >= m.daHeight.Load() { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: "batch not found", Error: da.ErrBlobNotFound}} @@ -174,11 +174,11 @@ func getKey(daHeight uint64, height uint64) []byte { } func (m *DataAvailabilityLayerClient) updateDAHeight() { - blockStep := rand.Uint64()%10 + 1 + blockStep := rand.Uint64()%10 + 1 //#nosec m.daHeight.Add(blockStep) } - +// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } diff --git a/da/registry/registry.go b/da/registry/registry.go index 4779e2ba0..b520c41c9 100644 --- a/da/registry/registry.go +++ b/da/registry/registry.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/dymint/da/local" ) - +// this is a central registry for all Data Availability Layer Clients var clients = map[string]func() da.DataAvailabilityLayerClient{ "mock": func() da.DataAvailabilityLayerClient { return &local.DataAvailabilityLayerClient{} }, "grpc": func() da.DataAvailabilityLayerClient { return &grpc.DataAvailabilityLayerClient{} }, @@ -16,7 +16,7 @@ var clients = map[string]func() da.DataAvailabilityLayerClient{ "avail": func() da.DataAvailabilityLayerClient { return &avail.DataAvailabilityLayerClient{} }, } - +// GetClient returns client identified by name. func GetClient(name string) da.DataAvailabilityLayerClient { f, ok := clients[name] if !ok { @@ -25,7 +25,7 @@ func GetClient(name string) da.DataAvailabilityLayerClient { return f() } - +// RegisteredClients returns names of all DA clients in registry. func RegisteredClients() []string { registered := make([]string, 0, len(clients)) for name := range clients { diff --git a/indexers/blockindexer/block.go b/indexers/blockindexer/block.go index 0ac87ba8f..08d2f6d16 100644 --- a/indexers/blockindexer/block.go +++ b/indexers/blockindexer/block.go @@ -8,19 +8,19 @@ import ( "github.com/tendermint/tendermint/types" ) - +// BlockIndexer defines an interface contract for indexing block events. type BlockIndexer interface { - - + // Has returns true if the given height has been indexed. An error is returned + // upon database query failure. Has(height int64) (bool, error) - + // Index indexes BeginBlock and EndBlock events for a given block by its height. Index(types.EventDataNewBlockHeader) error - - + // Search performs a query for block heights that match a given BeginBlock + // and Endblock event search criteria. Search(ctx context.Context, q *query.Query) ([]int64, error) - + // Delete indexed block entries up to (but not including) a height. It returns number of entries pruned. Prune(from, to uint64, logger log.Logger) (uint64, error) } diff --git a/indexers/blockindexer/kv/kv.go b/indexers/blockindexer/kv/kv.go index d2b1b813a..bb8ee295c 100644 --- a/indexers/blockindexer/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -27,9 +27,9 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) - - - +// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock +// events with an underlying KV store. Block events are indexed by their height, +// such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { store store.KV } @@ -40,8 +40,8 @@ func New(store store.KV) *BlockerIndexer { } } - - +// Has returns true if the given height has been indexed. An error is returned +// upon database query failure. func (idx *BlockerIndexer) Has(height int64) (bool, error) { key, err := heightKey(height) if err != nil { @@ -55,18 +55,18 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return err == nil, err } - - - - - - +// Index indexes BeginBlock and EndBlock events for a given block by its height. +// The following is indexed: +// +// primary key: encode(block.height | height) => encode(height) +// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) +// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Discard() height := bh.Header.Height - + // 1. index by height key, err := heightKey(height) if err != nil { return fmt.Errorf("create block height index key: %w", err) @@ -75,18 +75,18 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return err } - + // 2. index BeginBlock events beginKeys, err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height) if err != nil { return fmt.Errorf("index BeginBlock events: %w", err) } - + // 3. index EndBlock events endKeys, err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height) if err != nil { return fmt.Errorf("index EndBlock events: %w", err) } - + // 4. index all eventkeys by height key for easy pruning err = idx.addEventKeys(height, &beginKeys, &endKeys, batch) if err != nil { return err @@ -94,11 +94,11 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return batch.Commit() } - - - - - +// Search performs a query for block heights that match a given BeginBlock +// and Endblock event search criteria. The given query can match against zero, +// one or more block heights. In the case of height queries, i.e. block.height=H, +// if the height is indexed, that height alone will be returned. An error and +// nil slice is returned. Otherwise, a non-nil slice and nil error is returned. func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { @@ -113,8 +113,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return nil, fmt.Errorf("parse query conditions: %w", err) } - - + // If there is an exact height query, return the result immediately + // (if it exists). height, ok := lookForHeight(conditions) if ok { ok, err := idx.Has(height) @@ -132,11 +132,11 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, var heightsInitialized bool filteredHeights := make(map[string][]byte) - + // conditions to skip because they're handled before "everything else" skipIndexes := make([]int, 0) - - + // Extract ranges. If both upper and lower bounds exist, it's better to get + // them in order as to not iterate over kvs that are not within range. ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -155,8 +155,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - - + // Ignore any remaining conditions if the first condition resulted in no + // matches (assuming implicit AND operand). if len(filteredHeights) == 0 { break } @@ -169,7 +169,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - + // for all other conditions for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -188,8 +188,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - - + // Ignore any remaining conditions if the first condition resulted in no + // matches (assuming implicit AND operand). if len(filteredHeights) == 0 { break } @@ -201,7 +201,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - + // fetch matching heights results = make([]int64, 0, len(filteredHeights)) for _, hBz := range filteredHeights { cont := true @@ -232,12 +232,12 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return results, nil } - - - - - - +// matchRange returns all matching block heights that match a given QueryRange +// and start key. An already filtered result (filteredHeights) is provided such +// that any non-intersecting matches are removed. +// +// NOTE: The provided filteredHeights may be empty if no previous condition has +// matched. func (idx *BlockerIndexer) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -245,8 +245,8 @@ func (idx *BlockerIndexer) matchRange( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - - + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -314,18 +314,18 @@ LOOP: } if len(tmpHeights) == 0 || firstRun { - - - - - - - + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. return tmpHeights, nil } - - + // Remove/reduce matches in filteredHashes that were not found in this + // match (tmpHashes). for k := range filteredHeights { cont := true @@ -348,12 +348,12 @@ LOOP: return filteredHeights, nil } - - - - - - +// match returns all matching heights that meet a given query condition and start +// key. An already filtered result (filteredHeights) is provided such that any +// non-intersecting matches are removed. +// +// NOTE: The provided filteredHeights may be empty if no previous condition has +// matched. func (idx *BlockerIndexer) match( ctx context.Context, c query.Condition, @@ -361,8 +361,8 @@ func (idx *BlockerIndexer) match( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - - + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -457,18 +457,18 @@ func (idx *BlockerIndexer) match( } if len(tmpHeights) == 0 || firstRun { - - - - - - - + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. return tmpHeights, nil } - - + // Remove/reduce matches in filteredHeights that were not found in this + // match (tmpHeights). for k := range filteredHeights { cont := true @@ -495,7 +495,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, heightBz := int64ToBytes(height) keys := dmtypes.EventKeys{} for _, event := range events { - + // only index events with a non-empty type if len(event.Type) == 0 { continue } @@ -505,7 +505,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, continue } - + // index iff the event specified index:true and it's not a reserved event compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if compositeKey == tmtypes.BlockHeightKey { return dmtypes.EventKeys{}, fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) @@ -546,9 +546,9 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint return nil } - for h := int64(from); h < int64(to); h++ { + for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 - + // flush every 1000 blocks to avoid batches becoming too large if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -592,7 +592,7 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint } - err := flush(batch, int64(to)) + err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return 0, err } diff --git a/indexers/blockindexer/null/null.go b/indexers/blockindexer/null/null.go index ab80fa5a9..e6ee3335f 100644 --- a/indexers/blockindexer/null/null.go +++ b/indexers/blockindexer/null/null.go @@ -13,7 +13,7 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) - +// TxIndex implements a no-op block indexer. type BlockerIndexer struct{} func (idx *BlockerIndexer) Has(height int64) (bool, error) { diff --git a/indexers/blockindexer/query_range.go b/indexers/blockindexer/query_range.go index 9b2798524..b4edf53c5 100644 --- a/indexers/blockindexer/query_range.go +++ b/indexers/blockindexer/query_range.go @@ -6,21 +6,21 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) - - - +// QueryRanges defines a mapping between a composite event key and a QueryRange. +// +// e.g.account.number => queryRange{lowerBound: 1, upperBound: 5} type QueryRanges map[string]QueryRange - +// QueryRange defines a range within a query condition. type QueryRange struct { - LowerBound interface{} - UpperBound interface{} + LowerBound interface{} // int || time.Time + UpperBound interface{} // int || time.Time Key string IncludeLowerBound bool IncludeUpperBound bool } - +// AnyBound returns either the lower bound if non-nil, otherwise the upper bound. func (qr QueryRange) AnyBound() interface{} { if qr.LowerBound != nil { return qr.LowerBound @@ -29,8 +29,8 @@ func (qr QueryRange) AnyBound() interface{} { return qr.UpperBound } - - +// LowerBoundValue returns the value for the lower bound. If the lower bound is +// nil, nil will be returned. func (qr QueryRange) LowerBoundValue() interface{} { if qr.LowerBound == nil { return nil @@ -52,8 +52,8 @@ func (qr QueryRange) LowerBoundValue() interface{} { } } - - +// UpperBoundValue returns the value for the upper bound. If the upper bound is +// nil, nil will be returned. func (qr QueryRange) UpperBoundValue() interface{} { if qr.UpperBound == nil { return nil @@ -75,8 +75,8 @@ func (qr QueryRange) UpperBoundValue() interface{} { } } - - +// LookForRanges returns a mapping of QueryRanges and the matching indexes in +// the provided query conditions. func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { @@ -110,8 +110,8 @@ func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes [] return ranges, indexes } - - +// IsRangeOperation returns a boolean signifying if a query Operator is a range +// operation or not. func IsRangeOperation(op query.Operator) bool { switch op { case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: diff --git a/indexers/txindex/indexer.go b/indexers/txindex/indexer.go index 6e275a021..281c1dccc 100644 --- a/indexers/txindex/indexer.go +++ b/indexers/txindex/indexer.go @@ -10,33 +10,33 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) - +// TxIndexer interface defines methods to index and search transactions. type TxIndexer interface { - + // AddBatch analyzes, indexes and stores a batch of transactions. AddBatch(b *Batch) error - + // Index analyzes, indexes and stores a single transaction. Index(result *abci.TxResult) error - - + // Get returns the transaction specified by hash or nil if the transaction is not indexed + // or stored. Get(hash []byte) (*abci.TxResult, error) - + // Search allows you to query for transactions. Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) - + // Delete index entries for the heights between from (included) and to (not included). It returns heights pruned Prune(from, to uint64, logger log.Logger) (uint64, error) } - - +// Batch groups together multiple Index operations to be performed at the same time. +// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { Height int64 Ops []*abci.TxResult } - +// NewBatch creates a new Batch. func NewBatch(n int64, height int64) *Batch { return &Batch{ Height: height, @@ -44,16 +44,16 @@ func NewBatch(n int64, height int64) *Batch { } } - +// Add or update an entry for the given result.Index. func (b *Batch) Add(result *abci.TxResult) error { b.Ops[result.Index] = result return nil } - +// Size returns the total number of operations inside the batch. func (b *Batch) Size() int { return len(b.Ops) } - +// ErrorEmptyHash indicates empty hash var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/indexers/txindex/indexer_service.go b/indexers/txindex/indexer_service.go index 16e022f92..e5ec76696 100644 --- a/indexers/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -11,14 +11,14 @@ import ( "github.com/tendermint/tendermint/types" ) - +// XXX/TODO: These types should be moved to the indexer package. const ( subscriber = "IndexerService" ) - - +// IndexerService connects event bus, transaction and block indexers together in +// order to index transactions and blocks coming from the event bus. type IndexerService struct { service.BaseService @@ -27,7 +27,7 @@ type IndexerService struct { eventBus *types.EventBus } - +// NewIndexerService returns a new service instance. func NewIndexerService( txIdxr TxIndexer, blockIdxr indexer.BlockIndexer, @@ -38,12 +38,12 @@ func NewIndexerService( return is } - - +// OnStart implements service.Service by subscribing for all transactions +// and indexing them by events. func (is *IndexerService) OnStart() error { - - - + // Use SubscribeUnbuffered here to ensure both subscriptions does not get + // cancelled due to not pulling messages fast enough. Cause this might + // sometimes happen when there are no other subscribers. blockHeadersSub, err := is.eventBus.Subscribe( context.Background(), subscriber, @@ -94,16 +94,16 @@ func (is *IndexerService) OnStart() error { return nil } - +// OnStop implements service.Service by unsubscribing from all transactions. func (is *IndexerService) OnStop() { if is.eventBus.IsRunning() { _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) } } - +// Prune removes tx and blocks indexed up to (but not including) a height. func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { - + // load indexer base height indexerBaseHeight, err := s.LoadIndexerBaseHeight() if errors.Is(err, gerrc.ErrNotFound) { @@ -112,19 +112,19 @@ func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { return 0, err } - + // prune indexed blocks blockPruned, err := is.blockIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return blockPruned, err } - + // prune indexes txs txPruned, err := is.txIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return txPruned, err } - + // store indexer base height err = s.SaveIndexerBaseHeight(to) if err != nil { is.Logger.Error("saving indexer base height", "err", err) diff --git a/indexers/txindex/kv/kv.go b/indexers/txindex/kv/kv.go index 485ba01ea..e1ea88910 100644 --- a/indexers/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -29,20 +29,20 @@ const ( var _ txindex.TxIndexer = (*TxIndex)(nil) - +// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { store store.KV } - +// NewTxIndex creates new KV indexer. func NewTxIndex(store store.KV) *TxIndex { return &TxIndex{ store: store, } } - - +// Get gets transaction from the TxIndex storage and returns it or nil if the +// transaction is not found. func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { if len(hash) == 0 { return nil, txindex.ErrorEmptyHash @@ -65,10 +65,10 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return txResult, nil } - - - - +// AddBatch indexes a batch of transactions using the given list of events. Each +// key that indexed from the tx's events is a composite of the event type and +// the respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Discard() @@ -77,13 +77,13 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := types.Tx(result.Tx).Hash() - + // index tx by events eventKeys, err := txi.indexEvents(result, hash, storeBatch) if err != nil { return err } eventKeysBatch.Keys = append(eventKeysBatch.Keys, eventKeys.Keys...) - + // index by height (always) err = storeBatch.Set(keyForHeight(result), hash) if err != nil { return err @@ -93,7 +93,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { if err != nil { return err } - + // index by hash (always) err = storeBatch.Set(hash, rawBytes) if err != nil { return err @@ -108,29 +108,29 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return storeBatch.Commit() } - - - - +// Index indexes a single transaction using the given list of events. Each key +// that indexed from the tx's events is a composite of the event type and the +// respective attribute's key delimited by a "." (eg. "account.number"). +// Any event with an empty type is not indexed. func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Discard() hash := types.Tx(result.Tx).Hash() - + // index tx by events eventKeys, err := txi.indexEvents(result, hash, b) if err != nil { return err } - + // add event keys height index err = txi.addEventKeys(result.Height, &eventKeys, b) if err != nil { return nil } - + // index by height (always) err = b.Set(keyForHeight(result), hash) if err != nil { return err @@ -140,7 +140,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { if err != nil { return err } - + // index by hash (always) err = b.Set(hash, rawBytes) if err != nil { return err @@ -152,7 +152,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.KVBatch) (dmtypes.EventKeys, error) { eventKeys := dmtypes.EventKeys{} for _, event := range result.Result.Events { - + // only index events with a non-empty type if len(event.Type) == 0 { continue } @@ -162,7 +162,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. continue } - + // index if `index: true` is set compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if attr.GetIndex() { err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) @@ -177,17 +177,17 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. return eventKeys, nil } - - - - - - - - - - - +// Search performs a search using the given query. +// +// It breaks the query into conditions (like "tx.height > 5"). For each +// condition, it queries the DB index. One special use cases here: (1) if +// "tx.hash" is found, it returns tx result for it (2) for range queries it is +// better for the client to provide both lower and upper bounds, so we are not +// performing a full scan. Results from querying indexes are then intersected +// and returned to the caller, in no particular order. +// +// Search will exit early and return any result fetched so far, +// when a message is received on the context chan. func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { select { case <-ctx.Done(): @@ -199,13 +199,13 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul var hashesInitialized bool filteredHashes := make(map[string][]byte) - + // get a list of conditions (like "tx.height > 5") conditions, err := q.Conditions() if err != nil { return nil, fmt.Errorf("during parsing conditions from query: %w", err) } - + // if there is a hash condition, return the result immediately hash, ok, err := lookForHash(conditions) if err != nil { return nil, fmt.Errorf("during searching for a hash in the query: %w", err) @@ -221,12 +221,12 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - + // conditions to skip because they're handled before "everything else" skipIndexes := make([]int, 0) - - - + // extract ranges + // if both upper and lower bounds exist, it's better to get them in order not + // no iterate over kvs that are not within range. ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -236,8 +236,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true) hashesInitialized = true - - + // Ignore any remaining conditions if the first condition resulted + // in no matches (assuming implicit AND operand). if len(filteredHashes) == 0 { break } @@ -247,10 +247,10 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - + // if there is a height condition ("tx.height=3"), extract it height := lookForHeight(conditions) - + // for all other conditions for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -260,8 +260,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true) hashesInitialized = true - - + // Ignore any remaining conditions if the first condition resulted + // in no matches (assuming implicit AND operand). if len(filteredHashes) == 0 { break } @@ -283,7 +283,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } results = append(results, res) - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -308,7 +308,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) return } - +// lookForHeight returns a height if there is an "height=X" condition. func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { if c.CompositeKey == tmtypes.TxHeightKey && c.Op == query.OpEqual { @@ -318,11 +318,11 @@ func lookForHeight(conditions []query.Condition) (height int64) { return 0 } - - - - - +// match returns all matching txs by hash that meet a given condition and start +// key. An already filtered result (filteredHashes) is provided such that any +// non-intersecting matches are removed. +// +// NOTE: filteredHashes may be empty if no previous condition has matched. func (txi *TxIndex) match( ctx context.Context, c query.Condition, @@ -330,8 +330,8 @@ func (txi *TxIndex) match( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - - + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -348,7 +348,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -364,8 +364,8 @@ func (txi *TxIndex) match( } case c.Op == query.OpExists: - - + // XXX: can't use startKeyBz here because c.Operand is nil + // (e.g. "account.owner//" won't match w/ a single row) it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -374,7 +374,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -390,9 +390,9 @@ func (txi *TxIndex) match( } case c.Op == query.OpContains: - - - + // XXX: startKey does not apply here. + // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" + // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -407,7 +407,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() } - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -426,25 +426,25 @@ func (txi *TxIndex) match( } if len(tmpHashes) == 0 || firstRun { - - - - - - - + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. return tmpHashes } - - + // Remove/reduce matches in filteredHashes that were not found in this + // match (tmpHashes). for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -460,11 +460,11 @@ func (txi *TxIndex) match( return filteredHashes } - - - - - +// matchRange returns all matching txs by hash that meet a given queryRange and +// start key. An already filtered result (filteredHashes) is provided such that +// any non-intersecting matches are removed. +// +// NOTE: filteredHashes may be empty if no previous condition has matched. func (txi *TxIndex) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -472,8 +472,8 @@ func (txi *TxIndex) matchRange( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - - + // A previous match was attempted but resulted in no matches, so we return + // no matches (assuming AND operand). if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -512,15 +512,15 @@ LOOP: tmpHashes[string(it.Value())] = it.Value() } - - - - - - + // XXX: passing time in a ABCI Events is not yet implemented + // case time.Time: + // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) + // if v == r.upperBound { + // break + // } } - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -536,25 +536,25 @@ LOOP: } if len(tmpHashes) == 0 || firstRun { - - - - - - - + // Either: + // + // 1. Regardless if a previous match was attempted, which may have had + // results, but no match was found for the current condition, then we + // return no matches (assuming AND operand). + // + // 2. A previous match was not attempted, so we return all results. return tmpHashes } - - + // Remove/reduce matches in filteredHashes that were not found in this + // match (tmpHashes). for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - + // Potentially exit early. select { case <-ctx.Done(): cont = false @@ -592,9 +592,9 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 return nil } - for h := int64(from); h < int64(to); h++ { + for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 - + // flush every 1000 txs to avoid batches becoming too large if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -605,7 +605,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 toFlush = 0 } - + // first all events are pruned associated to the same height prunedEvents, err := txi.pruneEvents(h, batch) pruned += prunedEvents toFlush += prunedEvents @@ -614,10 +614,10 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 continue } - + // then all txs indexed are iterated by height it := txi.store.PrefixIterator(prefixForHeight(h)) - + // and deleted all indexed (by hash and by keyheight) for ; it.Valid(); it.Next() { toFlush++ if err := batch.Delete(it.Key()); err != nil { @@ -635,7 +635,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 } - err := flush(batch, int64(to)) + err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return 0, err } @@ -669,7 +669,7 @@ func (txi *TxIndex) pruneEvents(height int64, batch store.KVBatch) (uint64, erro } func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batch store.KVBatch) error { - + // index event keys by height eventKeyHeight, err := eventHeightKey(height) if err != nil { return err @@ -684,7 +684,7 @@ func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batc return nil } - +// Keys func isTagKey(key []byte) bool { return strings.Count(string(key), tagKeySeparator) == 3 diff --git a/indexers/txindex/kv/utils.go b/indexers/txindex/kv/utils.go index 05cb12c90..73cb223f2 100644 --- a/indexers/txindex/kv/utils.go +++ b/indexers/txindex/kv/utils.go @@ -4,7 +4,7 @@ import "github.com/google/orderedcode" const TxEventHeightKey = "txevent.height" - +// IntInSlice returns true if a is found in the list. func intInSlice(a int, list []int) bool { for _, b := range list { if b == a { diff --git a/indexers/txindex/null/null.go b/indexers/txindex/null/null.go index 7d2167389..426b08099 100644 --- a/indexers/txindex/null/null.go +++ b/indexers/txindex/null/null.go @@ -13,20 +13,20 @@ import ( var _ txindex.TxIndexer = (*TxIndex)(nil) - +// TxIndex acts as a /dev/null. type TxIndex struct{} - +// Get on a TxIndex is disabled and panics when invoked. func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } - +// AddBatch is a noop and always returns nil. func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } - +// Index is a noop and always returns nil. func (txi *TxIndex) Index(result *abci.TxResult) error { return nil } diff --git a/mempool/cache.go b/mempool/cache.go index fdb11ea5b..78aefa3c4 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -7,31 +7,31 @@ import ( "github.com/tendermint/tendermint/types" ) - - - - - +// TxCache defines an interface for raw transaction caching in a mempool. +// Currently, a TxCache does not allow direct reading or getting of transaction +// values. A TxCache is used primarily to push transactions and removing +// transactions. Pushing via Push returns a boolean telling the caller if the +// transaction already exists in the cache or not. type TxCache interface { - + // Reset resets the cache to an empty state. Reset() - - + // Push adds the given raw transaction to the cache and returns true if it was + // newly added. Otherwise, it returns false. Push(tx types.Tx) bool - + // Remove removes the given raw transaction from the cache. Remove(tx types.Tx) - - + // Has reports whether tx is present in the cache. Checking for presence is + // not treated as an access of the value. Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) - - +// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache +// only stores the hash of the raw transaction. type LRUTxCache struct { mtx sync.Mutex size int @@ -47,8 +47,8 @@ func NewLRUTxCache(cacheSize int) *LRUTxCache { } } - - +// GetList returns the underlying linked-list that backs the LRU cache. Note, +// this should be used for testing purposes only! func (c *LRUTxCache) GetList() *list.List { return c.list } @@ -109,7 +109,7 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { return ok } - +// NopTxCache defines a no-op raw transaction cache. type NopTxCache struct{} var _ TxCache = (*NopTxCache)(nil) diff --git a/mempool/clist/clist.go b/mempool/clist/clist.go index ff94a4b49..2e4171b1c 100644 --- a/mempool/clist/clist.go +++ b/mempool/clist/clist.go @@ -1,6 +1,15 @@ package clist +/* +The purpose of CList is to provide a goroutine-safe linked-list. +This list can be traversed concurrently by any number of goroutines. +However, removed CElements cannot be added back. +NOTE: Not all methods of container/list are (yet) implemented. +NOTE: Removed elements need to DetachPrev or DetachNext consistently +to ensure garbage collection of removed elements. + +*/ import ( "fmt" @@ -9,12 +18,29 @@ import ( tmsync "github.com/tendermint/tendermint/libs/sync" ) - - - +// MaxLength is the max allowed number of elements a linked list is +// allowed to contain. +// If more elements are pushed to the list it will panic. const MaxLength = int(^uint(0) >> 1) - +/* +CElement is an element of a linked-list +Traversal from a CElement is goroutine-safe. + +We can't avoid using WaitGroups or for-loops given the documentation +spec without re-implementing the primitives that already exist in +golang/sync. Notice that WaitGroup allows many go-routines to be +simultaneously released, which is what we want. Mutex doesn't do +this. RWMutex does this, but it's clumsy to use in the way that a +WaitGroup would be used -- and we'd end up having two RWMutex's for +prev/next each, which is doubly confusing. + +sync.Cond would be sort-of useful, but we don't need a write-lock in +the for-loop. Use sync.Cond when you need serial access to the +"condition". In our case our condition is if `next != nil || removed`, +and there's no reason to serialize that condition for goroutines +waiting on NextWait() (since it's just a read operation). +*/ type CElement struct { mtx tmsync.RWMutex prev *CElement @@ -25,11 +51,11 @@ type CElement struct { nextWaitCh chan struct{} removed bool - Value interface{} + Value interface{} // immutable } - - +// Blocking implementation of Next(). +// May return nil iff CElement was tail and got removed. func (e *CElement) NextWait() *CElement { for { e.mtx.RLock() @@ -43,13 +69,13 @@ func (e *CElement) NextWait() *CElement { } nextWg.Wait() - - + // e.next doesn't necessarily exist here. + // That's why we need to continue a for-loop. } } - - +// Blocking implementation of Prev(). +// May return nil iff CElement was head and got removed. func (e *CElement) PrevWait() *CElement { for { e.mtx.RLock() @@ -66,8 +92,8 @@ func (e *CElement) PrevWait() *CElement { } } - - +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. func (e *CElement) PrevWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -75,8 +101,8 @@ func (e *CElement) PrevWaitChan() <-chan struct{} { return e.prevWaitCh } - - +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. func (e *CElement) NextWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -84,7 +110,7 @@ func (e *CElement) NextWaitChan() <-chan struct{} { return e.nextWaitCh } - +// Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() val := e.next @@ -92,7 +118,7 @@ func (e *CElement) Next() *CElement { return val } - +// Nonblocking, may return nil if at the end. func (e *CElement) Prev() *CElement { e.mtx.RLock() prev := e.prev @@ -127,20 +153,20 @@ func (e *CElement) DetachPrev() { e.mtx.Unlock() } - - +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on nextWg. func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() oldNext := e.next e.next = newNext if oldNext != nil && newNext == nil { - - - - - - e.nextWg = waitGroup1() + // See https://golang.org/pkg/sync/: + // + // If a WaitGroup is reused to wait for several independent sets of + // events, new Add calls must happen after all previous Wait calls have + // returned. + e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -150,15 +176,15 @@ func (e *CElement) SetNext(newNext *CElement) { e.mtx.Unlock() } - - +// NOTE: This function needs to be safe for +// concurrent goroutines waiting on prevWg func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() + e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -173,7 +199,7 @@ func (e *CElement) SetRemoved() { e.removed = true - + // This wakes up anyone waiting in either direction. if e.prev == nil { e.prevWg.Done() close(e.prevWaitCh) @@ -185,20 +211,20 @@ func (e *CElement) SetRemoved() { e.mtx.Unlock() } +//-------------------------------------------------------------------------------- - - - - - +// CList represents a linked list. +// The zero value for CList is an empty list ready to use. +// Operations are goroutine-safe. +// Panics if length grows beyond the max. type CList struct { mtx tmsync.RWMutex wg *sync.WaitGroup waitCh chan struct{} - head *CElement - tail *CElement - len int - maxLen int + head *CElement // first element + tail *CElement // last element + len int // list length + maxLen int // max list length } func (l *CList) Init() *CList { @@ -213,11 +239,11 @@ func (l *CList) Init() *CList { return l } - +// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. func New() *CList { return newWithMax(MaxLength) } - - +// Return CList with given maxLength. +// Will panic if list exceeds given maxLength. func newWithMax(maxLength int) *CList { l := new(CList) l.maxLen = maxLength @@ -239,7 +265,7 @@ func (l *CList) Front() *CElement { } func (l *CList) FrontWait() *CElement { - + // Loop until the head is non-nil else wait and try again for { l.mtx.RLock() head := l.head @@ -250,7 +276,7 @@ func (l *CList) FrontWait() *CElement { return head } wg.Wait() - + // NOTE: If you think l.head exists here, think harder. } } @@ -272,13 +298,13 @@ func (l *CList) BackWait() *CElement { return tail } wg.Wait() - - + // l.tail doesn't necessarily exist here. + // That's why we need to continue a for-loop. } } - - +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. func (l *CList) WaitChan() <-chan struct{} { l.mtx.Lock() defer l.mtx.Unlock() @@ -286,11 +312,11 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } - +// Panics if list grows beyond its max length. func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - + // Construct a new element e := &CElement{ prev: nil, prevWg: waitGroup1(), @@ -302,7 +328,7 @@ func (l *CList) PushBack(v interface{}) *CElement { Value: v, } - + // Release waiters on FrontWait/BackWait maybe if l.len == 0 { l.wg.Done() close(l.waitCh) @@ -312,21 +338,21 @@ func (l *CList) PushBack(v interface{}) *CElement { } l.len++ - + // Modify the tail if l.tail == nil { l.head = e l.tail = e } else { - e.SetPrev(l.tail) - l.tail.SetNext(e) - l.tail = e + e.SetPrev(l.tail) // We must init e first. + l.tail.SetNext(e) // This will make e accessible. + l.tail = e // Update the list. } l.mtx.Unlock() return e } - - +// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. +// NOTE: As per the contract of CList, removed elements cannot be added back. func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() @@ -346,16 +372,16 @@ func (l *CList) Remove(e *CElement) interface{} { panic("Remove(e) with false tail") } - + // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { - l.wg = waitGroup1() + l.wg = waitGroup1() // WaitGroups are difficult to re-use. l.waitCh = make(chan struct{}) } - + // Update l.len l.len-- - + // Connect next/prev and set head/tail if prev == nil { l.head = next } else { @@ -367,7 +393,7 @@ func (l *CList) Remove(e *CElement) interface{} { next.SetPrev(prev) } - + // Set .Done() on e, otherwise waiters will wait forever. e.SetRemoved() l.mtx.Unlock() diff --git a/mempool/ids.go b/mempool/ids.go index 5afb3bc92..d64a07bda 100644 --- a/mempool/ids.go +++ b/mempool/ids.go @@ -1,3 +1,3 @@ package mempool - +// These functions were moved into v0/reactor.go and v1/reactor.go diff --git a/mempool/mempool.go b/mempool/mempool.go index dbbec0e02..48aa380f4 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -13,107 +13,107 @@ import ( const ( MempoolChannel = byte(0x30) - + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind PeerCatchupSleepIntervalMS = 100 - - + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) UnknownPeerID uint16 = 0 MaxActiveIDs = math.MaxUint16 ) - - - - +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// applications can reset their transient state on Commit. type Mempool interface { - - + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. RemoveTxByKey(txKey types.TxKey) error - - - - - - + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - - - + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). ReapMaxTxs(max int) types.Txs - - + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. Lock() - + // Unlock unlocks the mempool. Unlock() - - - - - - + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error - + // SetPreCheckFn sets the pre-check function. SetPreCheckFn(fn PreCheckFunc) - + // SetPostCheckFn sets the post-check function. SetPostCheckFn(fn PostCheckFunc) - - - - - + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. FlushAppConn() error - + // Flush removes all transactions from the mempool and caches. Flush() - - - - - + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. TxsAvailable() <-chan struct{} - - + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. EnableTxsAvailable() - + // Size returns the number of transactions in the mempool. Size() int - + // SizeBytes returns the total size of all txs in the mempool. SizeBytes() int64 } - - - +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. type PreCheckFunc func(types.Tx) error - - - +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error - - +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) @@ -126,8 +126,8 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { } } - - +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. func PostCheckMaxGas(maxGas int64) PostCheckFunc { return func(tx types.Tx, res *abci.ResponseCheckTx) error { if maxGas == -1 { @@ -146,14 +146,14 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { } } - +// ErrTxInCache is returned to the client if we saw tx earlier var ErrTxInCache = errors.New("tx already exists in cache") - +// TxKey is the fixed length array key used as an index. type TxKey [sha256.Size]byte - - +// ErrTxTooLarge defines an error when a transaction is too big to be sent in a +// message to other peers. type ErrTxTooLarge struct { Max int Actual int @@ -163,8 +163,8 @@ func (e ErrTxTooLarge) Error() string { return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) } - - +// ErrMempoolIsFull defines an error where Tendermint and the application cannot +// handle that much load. type ErrMempoolIsFull struct { NumTxs int MaxTxs int @@ -182,7 +182,7 @@ func (e ErrMempoolIsFull) Error() string { ) } - +// ErrPreCheck defines an error where a transaction fails a pre-check. type ErrPreCheck struct { Reason error } @@ -191,7 +191,7 @@ func (e ErrPreCheck) Error() string { return e.Reason.Error() } - +// IsPreCheckError returns true if err is due to pre check failure. func IsPreCheckError(err error) bool { return errors.As(err, &ErrPreCheck{}) } diff --git a/mempool/metrics.go b/mempool/metrics.go index 613715038..5d3022e80 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -8,42 +8,42 @@ import ( ) const ( - - + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. MetricsSubsystem = "mempool" ) - - +// Metrics contains metrics exposed by this package. +// see MetricsProvider for descriptions. type Metrics struct { - + // Size of the mempool. Size metrics.Gauge - + // Histogram of transaction sizes, in bytes. TxSizeBytes metrics.Histogram - + // Number of failed transactions. FailedTxs metrics.Counter - - - - + // RejectedTxs defines the number of rejected transactions. These are + // transactions that passed CheckTx but failed to make it into the mempool + // due to resource limits, e.g. mempool is full and no lower priority + // transactions exist in the mempool. RejectedTxs metrics.Counter - - - - + // EvictedTxs defines the number of evicted transactions. These are valid + // transactions that passed CheckTx and existed in the mempool but were later + // evicted to make room for higher priority valid transactions that passed + // CheckTx. EvictedTxs metrics.Counter - + // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter } - - - +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels := []string{} for i := 0; i < len(labelsAndValues); i += 2 { @@ -95,7 +95,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { } } - +// NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ Size: discard.NewGauge(), diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go index 014816a9e..3f293381f 100644 --- a/mempool/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" ) - +// Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} var _ mempool.Mempool = Mempool{} diff --git a/mempool/tx.go b/mempool/tx.go index 191f1cbc0..d13f3d6b8 100644 --- a/mempool/tx.go +++ b/mempool/tx.go @@ -4,14 +4,14 @@ import ( "github.com/tendermint/tendermint/p2p" ) - - +// TxInfo are parameters that get passed when attempting to add a tx to the +// mempool. type TxInfo struct { - - - + // SenderID is the internal peer ID used in the mempool to identify the + // sender, storing two bytes with each transaction instead of 20 bytes for + // the types.NodeID. SenderID uint16 - + // SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging. SenderP2PID p2p.ID } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index a543b64ab..1d0eb3a9b 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -20,45 +20,45 @@ import ( var _ mempool.Mempool = (*TxMempool)(nil) - +// TxMempoolOption sets an optional parameter on the TxMempool. type TxMempoolOption func(*TxMempool) - - - - - - - - - +// TxMempool implemements the Mempool interface and allows the application to +// set priority values on transactions in the CheckTx response. When selecting +// transactions to include in a block, higher-priority transactions are chosen +// first. When evicting transactions from the mempool for size constraints, +// lower-priority transactions are evicted sooner. +// +// Within the mempool, transactions are ordered by time of arrival, and are +// gossiped to the rest of the network based on that order (gossip order does +// not take priority into account). type TxMempool struct { - + // Immutable fields logger log.Logger config *config.MempoolConfig proxyAppConn proxy.AppConnMempool metrics *mempool.Metrics - cache mempool.TxCache + cache mempool.TxCache // seen transactions - - txsBytes int64 - txRecheck int64 + // Atomically-updated fields + txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes + txRecheck int64 // atomic: the number of pending recheck calls - + // Synchronized fields, protected by mtx. mtx *sync.RWMutex notifiedTxsAvailable bool - txsAvailable chan struct{} + txsAvailable chan struct{} // one value sent per height when mempool is not empty preCheck mempool.PreCheckFunc postCheck mempool.PostCheckFunc - height int64 + height int64 // the latest height passed to Update - txs *clist.CList + txs *clist.CList // valid transactions (passed CheckTx) txByKey map[types.TxKey]*clist.CElement - txBySender map[string]*clist.CElement + txBySender map[string]*clist.CElement // for sender != "" } - - +// NewTxMempool constructs a new, empty priority mempool at the specified +// initial height and using the given config and options. func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -91,59 +91,59 @@ func NewTxMempool( return txmp } - - - +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.preCheck = f } } - - - +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.postCheck = f } } - +// WithMetrics sets the mempool's metrics collector. func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { return func(txmp *TxMempool) { txmp.metrics = metrics } } - - +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } - +// Unlock releases a write-lock on the mempool. func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } - - +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. func (txmp *TxMempool) Size() int { return txmp.txs.Len() } - - +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } - - - - +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling FlushAppConn. func (txmp *TxMempool) FlushAppConn() error { - - - - - - + // N.B.: We have to issue the call outside the lock so that its callback can + // fire. It's safe to do this, the flush will block until complete. + // + // We could just not require the caller to hold the lock at all, but the + // semantics of the Mempool interface require the caller to hold it, and we + // can't change that without disrupting existing use. txmp.mtx.Unlock() defer txmp.mtx.Lock() return txmp.proxyAppConn.FlushSync() } - - +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. func (txmp *TxMempool) EnableTxsAvailable() { txmp.mtx.Lock() defer txmp.mtx.Unlock() @@ -151,60 +151,60 @@ func (txmp *TxMempool) EnableTxsAvailable() { txmp.txsAvailable = make(chan struct{}, 1) } - - +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } - - - - - - - - - - - - - - - - - - - - +// CheckTx adds the given transaction to the mempool if it fits and passes the +// application's ABCI CheckTx method. +// +// CheckTx reports an error without adding tx if: +// +// - The size of tx exceeds the configured maximum transaction size. +// - The pre-check hook is defined and reports an error for tx. +// - The transaction already exists in the cache. +// - The proxy connection to the application fails. +// +// If tx passes all of the above conditions, it is passed (asynchronously) to +// the application's ABCI CheckTx method and this CheckTx method returns nil. +// If cb != nil, it is called when the ABCI request completes to report the +// application response. +// +// If the application accepts the transaction and the mempool is full, the +// mempool evicts one or more of the lowest-priority transaction whose priority +// is (strictly) lower than the priority of tx and whose size together exceeds +// the size of tx, and adds tx instead. If no such transactions exist, tx is +// discarded. func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { - - - + // During the initial phase of CheckTx, we do not need to modify any state. + // A transaction will not actually be added to the mempool until it survives + // a call to the ABCI CheckTx method and size constraint checks. height, err := func() (int64, error) { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - + // Reject transactions in excess of the configured maximum transaction size. if len(tx) > txmp.config.MaxTxBytes { return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - + // If a precheck hook is defined, call it before invoking the application. if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { return 0, mempool.ErrPreCheck{Reason: err} } } - + // Early exit if the proxy connection has an error. if err := txmp.proxyAppConn.Error(); err != nil { return 0, err } txKey := tx.Key() - + // Check for the transaction in the cache. if !txmp.cache.Push(tx) { - + // If the cached transaction is also in the pool, record its sender. if elt, ok := txmp.txByKey[txKey]; ok { w, _ := elt.Value.(*WrappedTx) w.SetPeer(txInfo.SenderID) @@ -217,13 +217,13 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return err } - - - - - - - + // Initiate an ABCI CheckTx for this transaction. The callback is + // responsible for adding the transaction to the pool if it survives. + // + // N.B.: We have to issue the call outside the lock. In a local client, + // even an "async" call invokes its callback immediately which will make + // the callback deadlock trying to acquire the same lock. This isn't a + // problem with out-of-process calls, but this has to work for both. reqRes := txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) if err := txmp.proxyAppConn.FlushSync(); err != nil { return err @@ -244,17 +244,17 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return nil } - - - +// RemoveTxByKey removes the transaction with the specified key from the +// mempool. It reports an error if no such transaction exists. This operation +// does not remove the transaction from the cache. func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { txmp.mtx.Lock() defer txmp.mtx.Unlock() return txmp.removeTxByKey(txKey) } - - +// removeTxByKey removes the specified transaction key from the mempool. +// The caller must hold txmp.mtx excluxively. func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { if elt, ok := txmp.txByKey[key]; ok { w, _ := elt.Value.(*WrappedTx) @@ -269,8 +269,8 @@ func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { return fmt.Errorf("transaction %x not found", key) } - - +// removeTxByElement removes the specified transaction element from the mempool. +// The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { w, _ := elt.Value.(*WrappedTx) delete(txmp.txByKey, w.tx.Key()) @@ -281,14 +281,14 @@ func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { atomic.AddInt64(&txmp.txsBytes, -w.Size()) } - - +// Flush purges the contents of the mempool and the cache, leaving both empty. +// The current height is not modified by this operation. func (txmp *TxMempool) Flush() { txmp.mtx.Lock() defer txmp.mtx.Unlock() - - + // Remove all the transactions in the list explicitly, so that the sizes + // and indexes get updated properly. cur := txmp.txs.Front() for cur != nil { next := cur.Next() @@ -297,14 +297,14 @@ func (txmp *TxMempool) Flush() { } txmp.cache.Reset() - - + // Discard any pending recheck calls that may be in flight. The calls will + // still complete, but will have no effect on the mempool. atomic.StoreInt64(&txmp.txRecheck, 0) } - - - +// allEntriesSorted returns a slice of all the transactions currently in the +// mempool, sorted in nonincreasing order by priority with ties broken by +// increasing order of arrival time. func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() @@ -317,28 +317,28 @@ func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { if all[i].priority == all[j].priority { return all[i].timestamp.Before(all[j].timestamp) } - return all[i].priority > all[j].priority + return all[i].priority > all[j].priority // N.B. higher priorities first }) return all } - - - - - - - - - - +// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the +// size and gas constraints. The results are ordered by nonincreasing priority, +// with ties broken by increasing order of arrival. Reaping transactions does +// not remove them from the mempool. +// +// If maxBytes < 0, no limit is set on the total size in bytes. +// If maxGas < 0, no limit is set on the total gas cost. +// +// If the mempool is empty or has no transactions fitting within the given +// constraints, the result will also be empty. func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { var totalGas, totalBytes int64 - var keep []types.Tx + var keep []types.Tx //nolint:prealloc for _, w := range txmp.allEntriesSorted() { - - + // N.B. When computing byte size, we need to include the overhead for + // encoding as protobuf to send to the application. totalGas += w.gasWanted totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { @@ -349,24 +349,24 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { return keep } - - +// TxsWaitChan returns a channel that is closed when there is at least one +// transaction available to be gossiped. func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } - - +// TxsFront returns the frontmost element of the pending transaction list. +// It will be nil if the mempool is empty. func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } - - - - - - - - +// ReapMaxTxs returns up to max transactions from the mempool. The results are +// ordered by nonincreasing priority with ties broken by increasing order of +// arrival. Reaping transactions does not remove them from the mempool. +// +// If max < 0, all transactions in the mempool are reaped. +// +// The result may have fewer than max elements (possibly zero) if the mempool +// does not have that many transactions available. func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - var keep []types.Tx + var keep []types.Tx //nolint:prealloc for _, w := range txmp.allEntriesSorted() { if max >= 0 && len(keep) >= max { @@ -377,28 +377,28 @@ func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { return keep } - - - - - - - - - - - +// Update removes all the given transactions from the mempool and the cache, +// and updates the current block height. The blockTxs and deliverTxResponses +// must have the same length with each response corresponding to the tx at the +// same offset. +// +// If the configuration enables recheck, Update sends each remaining +// transaction after removing blockTxs to the ABCI CheckTx method. Any +// transactions marked as invalid during recheck are also removed. +// +// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before +// calling Update. func (txmp *TxMempool) Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error { - + // Safety sanity check: The caller is required to hold the lock. if txmp.mtx.TryLock() { txmp.mtx.Unlock() panic("mempool: Update caller does not hold the lock") } - + // Safety check: Transactions and responses must match in number. if len(blockTxs) != len(deliverTxResponses) { panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses", len(blockTxs), len(deliverTxResponses))) @@ -408,24 +408,24 @@ func (txmp *TxMempool) Update( txmp.notifiedTxsAvailable = false for i, tx := range blockTxs { - - - + // Add successful committed transactions to the cache (if they are not + // already present). Transactions that failed to commit are removed from + // the cache unless the operator has explicitly requested we keep them. if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(tx) } - + // Regardless of success, remove the transaction from the mempool. _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) - - - + // If there are any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. size := txmp.Size() txmp.metrics.Size.Set(float64(size)) if size > 0 { @@ -446,19 +446,19 @@ func (txmp *TxMempool) SetPostCheckFn(fn mempool.PostCheckFunc) { txmp.postCheck = fn } - - - - - - - - - - - - - +// initialTxCallback handles the ABCI CheckTx response for the first time a +// transaction is added to the mempool. A recheck after a block is committed +// goes to the default callback (see recheckTxCallback). +// +// If either the application rejected the transaction or a post-check hook is +// defined and rejects the transaction, it is discarded. +// +// Otherwise, if the mempool is full, check for lower-priority transactions +// that can be evicted to make room for the new one. If no such transactions +// exist, this transaction is logged and dropped; otherwise the selected +// transactions are evicted. +// +// Finally, the new transaction is added and size stats updated. func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { @@ -490,14 +490,14 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.metrics.FailedTxs.Add(1) - - + // Remove the invalid transaction from the cache, unless the operator has + // instructed us to keep invalid transactions. if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - - + // If there was a post-check error, record its text in the result for + // debugging purposes. if err != nil { checkTxRes.CheckTx.MempoolError = err.Error() } @@ -507,9 +507,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { priority := checkTxRes.CheckTx.Priority sender := checkTxRes.CheckTx.Sender - - - + // Disallow multiple concurrent transactions from the same sender assigned + // by the ABCI application. As a special case, an empty sender is not + // restricted. if sender != "" { elt, ok := txmp.txBySender[sender] if ok { @@ -526,15 +526,15 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - - - - - + // At this point the application has ruled the transaction valid, but the + // mempool might be full. If so, find the lowest-priority items with lower + // priority than the application assigned to this new one, and evict as many + // of them as necessary to make room for tx. If no such items exist, we + // discard tx. if err := txmp.canAddTx(wtx); err != nil { - var victims []*clist.CElement - var victimBytes int64 + var victims []*clist.CElement // eligible transactions for eviction + var victimBytes int64 // total size of victims for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { cw := cur.Value.(*WrappedTx) if cw.priority < priority { @@ -543,9 +543,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - - - + // If there are no suitable eviction candidates, or the total size of + // those candidates is not enough to make room for the new transaction, + // drop the new one. if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( @@ -564,8 +564,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { "new_priority", priority, ) - - + // Sort lowest priority items first so they will be evicted first. Break + // ties in favor of newer items (to maintain FIFO semantics in a group). sort.Slice(victims, func(i, j int) bool { iw := victims[i].Value.(*WrappedTx) jw := victims[j].Value.(*WrappedTx) @@ -575,7 +575,7 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { return iw.Priority() < jw.Priority() }) - + // Evict as many of the victims as necessary to make room. var evictedBytes int64 for _, vic := range victims { w := vic.Value.(*WrappedTx) @@ -589,8 +589,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) - - + // We may not need to evict all the eligible transactions. Bail out + // early if we have made enough room. evictedBytes += w.Size() if evictedBytes >= wtx.Size() { break @@ -625,26 +625,26 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { atomic.AddInt64(&txmp.txsBytes, wtx.Size()) } - - - - - - +// recheckTxCallback handles the responses from ABCI CheckTx calls issued +// during the recheck phase of a block Update. It updates the recheck counter +// and removes any transactions invalidated by the application. +// +// This callback is NOT executed for the initial CheckTx on a new transaction; +// that case is handled by initialTxCallback instead. func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { - - + // Don't log this; this is the default callback and other response types + // can safely be ignored. return } - - - + // Check whether we are expecting recheck responses at this point. + // If not, we will ignore the response, this usually means the mempool was Flushed. + // If this is the "last" pending recheck, trigger a notification when it's been processed. numLeft := atomic.AddInt64(&txmp.txRecheck, -1) if numLeft == 0 { - defer txmp.notifyTxsAvailable() + defer txmp.notifyTxsAvailable() // notify waiters on return, if mempool is non-empty } else if numLeft < 0 { return } @@ -655,16 +655,16 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.mtx.Lock() defer txmp.mtx.Unlock() - - - + // Find the transaction reported by the ABCI callback. It is possible the + // transaction was evicted during the recheck, in which case the transaction + // will be gone. elt, ok := txmp.txByKey[tx.Key()] if !ok { return } wtx := elt.Value.(*WrappedTx) - + // If a postcheck hook is defined, call it before checking the result. var err error if txmp.postCheck != nil { err = txmp.postCheck(tx, checkTxRes.CheckTx) @@ -672,7 +672,7 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { wtx.SetPriority(checkTxRes.CheckTx.Priority) - return + return // N.B. Size of mempool did not change } txmp.logger.Debug( @@ -690,12 +690,12 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.Size.Set(float64(txmp.Size())) } - - - - - - +// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions +// currently in the mempool. It reports the number of recheck calls that were +// successfully initiated. +// +// Precondition: The mempool is not empty. +// The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) recheckTransactions() { if txmp.Size() == 0 { panic("mempool: cannot run recheck on an empty mempool") @@ -705,10 +705,10 @@ func (txmp *TxMempool) recheckTransactions() { "num_txs", txmp.Size(), "height", txmp.height, ) - - - - + // N.B.: We have to issue the calls outside the lock. In a local client, + // even an "async" call invokes its callback immediately which will make the + // callback deadlock trying to acquire the same lock. This isn't a problem + // with out-of-process calls, but this has to work for both. txmp.mtx.Unlock() defer txmp.mtx.Lock() @@ -716,7 +716,7 @@ func (txmp *TxMempool) recheckTransactions() { for e := txmp.txs.Front(); e != nil; e = e.Next() { wtx := e.Value.(*WrappedTx) - + // The response for this CheckTx is handled by the default recheckTxCallback. _ = txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ Tx: wtx.tx, Type: abci.CheckTxType_Recheck, @@ -730,9 +730,9 @@ func (txmp *TxMempool) recheckTransactions() { txmp.proxyAppConn.FlushAsync() } - - - +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. Otherwise, nil is +// returned and the transaction can be inserted into the mempool. func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { numTxs := txmp.Size() txBytes := txmp.SizeBytes() @@ -749,21 +749,21 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } - - - - - +// purgeExpiredTxs removes all transactions from the mempool that have exceeded +// their respective height or time-based limits as of the given blockHeight. +// Transactions removed by this operation are not removed from the cache. +// +// The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { - return + return // nothing to do } now := time.Now() cur := txmp.txs.Front() for cur != nil { - - + // N.B. Grab the next element first, since if we remove cur its successor + // will be invalidated. next := cur.Next() w := cur.Value.(*WrappedTx) @@ -782,11 +782,11 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - return + return // nothing to do } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - + // channel cap is 1, so this will send once txmp.notifiedTxsAvailable = true select { diff --git a/mempool/v1/tx.go b/mempool/v1/tx.go index 88134c052..88522a8a7 100644 --- a/mempool/v1/tx.go +++ b/mempool/v1/tx.go @@ -7,25 +7,25 @@ import ( "github.com/tendermint/tendermint/types" ) - - +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. type WrappedTx struct { - tx types.Tx - hash types.TxKey - height int64 - timestamp time.Time + tx types.Tx // the original transaction data + hash types.TxKey // the transaction hash + height int64 // height when this transaction was initially checked (for expiry) + timestamp time.Time // time when transaction was entered (for TTL) mtx sync.Mutex - gasWanted int64 - priority int64 - sender string - peers map[uint16]bool + gasWanted int64 // app: gas required to execute this transaction + priority int64 // app: priority value for this transaction + sender string // app: assigned sender label + peers map[uint16]bool // peer IDs who have sent us this transaction } - +// Size reports the size of the raw transaction in bytes. func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } - +// SetPeer adds the specified peer ID as a sender of w. func (w *WrappedTx) SetPeer(id uint16) { w.mtx.Lock() defer w.mtx.Unlock() @@ -36,7 +36,7 @@ func (w *WrappedTx) SetPeer(id uint16) { } } - +// HasPeer reports whether the specified peer ID is a sender of w. func (w *WrappedTx) HasPeer(id uint16) bool { w.mtx.Lock() defer w.mtx.Unlock() @@ -44,42 +44,42 @@ func (w *WrappedTx) HasPeer(id uint16) bool { return ok } - +// SetGasWanted sets the application-assigned gas requirement of w. func (w *WrappedTx) SetGasWanted(gas int64) { w.mtx.Lock() defer w.mtx.Unlock() w.gasWanted = gas } - +// GasWanted reports the application-assigned gas requirement of w. func (w *WrappedTx) GasWanted() int64 { w.mtx.Lock() defer w.mtx.Unlock() return w.gasWanted } - +// SetSender sets the application-assigned sender of w. func (w *WrappedTx) SetSender(sender string) { w.mtx.Lock() defer w.mtx.Unlock() w.sender = sender } - +// Sender reports the application-assigned sender of w. func (w *WrappedTx) Sender() string { w.mtx.Lock() defer w.mtx.Unlock() return w.sender } - +// SetPriority sets the application-assigned priority of w. func (w *WrappedTx) SetPriority(p int64) { w.mtx.Lock() defer w.mtx.Unlock() w.priority = p } - +// Priority reports the application-assigned priority of w. func (w *WrappedTx) Priority() int64 { w.mtx.Lock() defer w.mtx.Unlock() diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go index 6098f6c98..2ba9eee27 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package block @@ -16,7 +16,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - +// MockExecutorI is an autogenerated mock type for the ExecutorI type type MockExecutorI struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockExecutorI) EXPECT() *MockExecutorI_Expecter { return &MockExecutorI_Expecter{mock: &_m.Mock} } - +// AddConsensusMsgs provides a mock function with given fields: _a0 func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _va := make([]interface{}, len(_a0)) for _i := range _a0 { @@ -40,13 +40,13 @@ func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _m.Called(_ca...) } - +// MockExecutorI_AddConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddConsensusMsgs' type MockExecutorI_AddConsensusMsgs_Call struct { *mock.Call } - - +// AddConsensusMsgs is a helper method to define mock.On call +// - _a0 ...proto.Message func (_e *MockExecutorI_Expecter) AddConsensusMsgs(_a0 ...interface{}) *MockExecutorI_AddConsensusMsgs_Call { return &MockExecutorI_AddConsensusMsgs_Call{Call: _e.mock.On("AddConsensusMsgs", append([]interface{}{}, _a0...)...)} @@ -75,7 +75,7 @@ func (_c *MockExecutorI_AddConsensusMsgs_Call) RunAndReturn(run func(...proto.Me return _c } - +// Commit provides a mock function with given fields: _a0, _a1, resp func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state.ABCIResponses) ([]byte, int64, error) { ret := _m.Called(_a0, _a1, resp) @@ -112,15 +112,15 @@ func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state. return r0, r1, r2 } - +// MockExecutorI_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' type MockExecutorI_Commit_Call struct { *mock.Call } - - - - +// Commit is a helper method to define mock.On call +// - _a0 *types.State +// - _a1 *types.Block +// - resp *state.ABCIResponses func (_e *MockExecutorI_Expecter) Commit(_a0 interface{}, _a1 interface{}, resp interface{}) *MockExecutorI_Commit_Call { return &MockExecutorI_Commit_Call{Call: _e.mock.On("Commit", _a0, _a1, resp)} } @@ -142,7 +142,7 @@ func (_c *MockExecutorI_Commit_Call) RunAndReturn(run func(*types.State, *types. return _c } - +// CreateBlock provides a mock function with given fields: height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, nextSeqHash [32]byte, _a4 *types.State, maxBlockDataSizeBytes uint64) *types.Block { ret := _m.Called(height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes) @@ -162,18 +162,18 @@ func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, la return r0 } - +// MockExecutorI_CreateBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBlock' type MockExecutorI_CreateBlock_Call struct { *mock.Call } - - - - - - - +// CreateBlock is a helper method to define mock.On call +// - height uint64 +// - lastCommit *types.Commit +// - lastHeaderHash [32]byte +// - nextSeqHash [32]byte +// - _a4 *types.State +// - maxBlockDataSizeBytes uint64 func (_e *MockExecutorI_Expecter) CreateBlock(height interface{}, lastCommit interface{}, lastHeaderHash interface{}, nextSeqHash interface{}, _a4 interface{}, maxBlockDataSizeBytes interface{}) *MockExecutorI_CreateBlock_Call { return &MockExecutorI_CreateBlock_Call{Call: _e.mock.On("CreateBlock", height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes)} } @@ -195,7 +195,7 @@ func (_c *MockExecutorI_CreateBlock_Call) RunAndReturn(run func(uint64, *types.C return _c } - +// ExecuteBlock provides a mock function with given fields: _a0 func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, error) { ret := _m.Called(_a0) @@ -225,13 +225,13 @@ func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, e return r0, r1 } - +// MockExecutorI_ExecuteBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteBlock' type MockExecutorI_ExecuteBlock_Call struct { *mock.Call } - - +// ExecuteBlock is a helper method to define mock.On call +// - _a0 *types.Block func (_e *MockExecutorI_Expecter) ExecuteBlock(_a0 interface{}) *MockExecutorI_ExecuteBlock_Call { return &MockExecutorI_ExecuteBlock_Call{Call: _e.mock.On("ExecuteBlock", _a0)} } @@ -253,7 +253,7 @@ func (_c *MockExecutorI_ExecuteBlock_Call) RunAndReturn(run func(*types.Block) ( return _c } - +// GetAppInfo provides a mock function with given fields: func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { ret := _m.Called() @@ -283,12 +283,12 @@ func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { return r0, r1 } - +// MockExecutorI_GetAppInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAppInfo' type MockExecutorI_GetAppInfo_Call struct { *mock.Call } - +// GetAppInfo is a helper method to define mock.On call func (_e *MockExecutorI_Expecter) GetAppInfo() *MockExecutorI_GetAppInfo_Call { return &MockExecutorI_GetAppInfo_Call{Call: _e.mock.On("GetAppInfo")} } @@ -310,7 +310,7 @@ func (_c *MockExecutorI_GetAppInfo_Call) RunAndReturn(run func() (*abcitypes.Res return _c } - +// GetConsensusMsgs provides a mock function with given fields: func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { ret := _m.Called() @@ -330,12 +330,12 @@ func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { return r0 } - +// MockExecutorI_GetConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConsensusMsgs' type MockExecutorI_GetConsensusMsgs_Call struct { *mock.Call } - +// GetConsensusMsgs is a helper method to define mock.On call func (_e *MockExecutorI_Expecter) GetConsensusMsgs() *MockExecutorI_GetConsensusMsgs_Call { return &MockExecutorI_GetConsensusMsgs_Call{Call: _e.mock.On("GetConsensusMsgs")} } @@ -357,7 +357,7 @@ func (_c *MockExecutorI_GetConsensusMsgs_Call) RunAndReturn(run func() []proto.M return _c } - +// InitChain provides a mock function with given fields: genesis, genesisChecksum, valset func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisChecksum string, valset []*tenderminttypes.Validator) (*abcitypes.ResponseInitChain, error) { ret := _m.Called(genesis, genesisChecksum, valset) @@ -387,15 +387,15 @@ func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisC return r0, r1 } - +// MockExecutorI_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' type MockExecutorI_InitChain_Call struct { *mock.Call } - - - - +// InitChain is a helper method to define mock.On call +// - genesis *tenderminttypes.GenesisDoc +// - genesisChecksum string +// - valset []*tenderminttypes.Validator func (_e *MockExecutorI_Expecter) InitChain(genesis interface{}, genesisChecksum interface{}, valset interface{}) *MockExecutorI_InitChain_Call { return &MockExecutorI_InitChain_Call{Call: _e.mock.On("InitChain", genesis, genesisChecksum, valset)} } @@ -417,18 +417,18 @@ func (_c *MockExecutorI_InitChain_Call) RunAndReturn(run func(*tenderminttypes.G return _c } - +// UpdateMempoolAfterInitChain provides a mock function with given fields: s func (_m *MockExecutorI) UpdateMempoolAfterInitChain(s *types.State) { _m.Called(s) } - +// MockExecutorI_UpdateMempoolAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateMempoolAfterInitChain' type MockExecutorI_UpdateMempoolAfterInitChain_Call struct { *mock.Call } - - +// UpdateMempoolAfterInitChain is a helper method to define mock.On call +// - s *types.State func (_e *MockExecutorI_Expecter) UpdateMempoolAfterInitChain(s interface{}) *MockExecutorI_UpdateMempoolAfterInitChain_Call { return &MockExecutorI_UpdateMempoolAfterInitChain_Call{Call: _e.mock.On("UpdateMempoolAfterInitChain", s)} } @@ -450,7 +450,7 @@ func (_c *MockExecutorI_UpdateMempoolAfterInitChain_Call) RunAndReturn(run func( return _c } - +// UpdateProposerFromBlock provides a mock function with given fields: s, seqSet, _a2 func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, _a2 *types.Block) bool { ret := _m.Called(s, seqSet, _a2) @@ -468,15 +468,15 @@ func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.S return r0 } - +// MockExecutorI_UpdateProposerFromBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateProposerFromBlock' type MockExecutorI_UpdateProposerFromBlock_Call struct { *mock.Call } - - - - +// UpdateProposerFromBlock is a helper method to define mock.On call +// - s *types.State +// - seqSet *types.SequencerSet +// - _a2 *types.Block func (_e *MockExecutorI_Expecter) UpdateProposerFromBlock(s interface{}, seqSet interface{}, _a2 interface{}) *MockExecutorI_UpdateProposerFromBlock_Call { return &MockExecutorI_UpdateProposerFromBlock_Call{Call: _e.mock.On("UpdateProposerFromBlock", s, seqSet, _a2)} } @@ -498,22 +498,22 @@ func (_c *MockExecutorI_UpdateProposerFromBlock_Call) RunAndReturn(run func(*typ return _c } - +// UpdateStateAfterCommit provides a mock function with given fields: s, resp, appHash, height, lastHeaderHash func (_m *MockExecutorI) UpdateStateAfterCommit(s *types.State, resp *state.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { _m.Called(s, resp, appHash, height, lastHeaderHash) } - +// MockExecutorI_UpdateStateAfterCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterCommit' type MockExecutorI_UpdateStateAfterCommit_Call struct { *mock.Call } - - - - - - +// UpdateStateAfterCommit is a helper method to define mock.On call +// - s *types.State +// - resp *state.ABCIResponses +// - appHash []byte +// - height uint64 +// - lastHeaderHash [32]byte func (_e *MockExecutorI_Expecter) UpdateStateAfterCommit(s interface{}, resp interface{}, appHash interface{}, height interface{}, lastHeaderHash interface{}) *MockExecutorI_UpdateStateAfterCommit_Call { return &MockExecutorI_UpdateStateAfterCommit_Call{Call: _e.mock.On("UpdateStateAfterCommit", s, resp, appHash, height, lastHeaderHash)} } @@ -535,19 +535,19 @@ func (_c *MockExecutorI_UpdateStateAfterCommit_Call) RunAndReturn(run func(*type return _c } - +// UpdateStateAfterInitChain provides a mock function with given fields: s, res func (_m *MockExecutorI) UpdateStateAfterInitChain(s *types.State, res *abcitypes.ResponseInitChain) { _m.Called(s, res) } - +// MockExecutorI_UpdateStateAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterInitChain' type MockExecutorI_UpdateStateAfterInitChain_Call struct { *mock.Call } - - - +// UpdateStateAfterInitChain is a helper method to define mock.On call +// - s *types.State +// - res *abcitypes.ResponseInitChain func (_e *MockExecutorI_Expecter) UpdateStateAfterInitChain(s interface{}, res interface{}) *MockExecutorI_UpdateStateAfterInitChain_Call { return &MockExecutorI_UpdateStateAfterInitChain_Call{Call: _e.mock.On("UpdateStateAfterInitChain", s, res)} } @@ -569,8 +569,8 @@ func (_c *MockExecutorI_UpdateStateAfterInitChain_Call) RunAndReturn(run func(*t return _c } - - +// NewMockExecutorI creates a new instance of MockExecutorI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockExecutorI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go index 54b9098d2..932c51a2e 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package block @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" ) - +// MockFraudHandler is an autogenerated mock type for the FraudHandler type type MockFraudHandler struct { mock.Mock } @@ -21,19 +21,19 @@ func (_m *MockFraudHandler) EXPECT() *MockFraudHandler_Expecter { return &MockFraudHandler_Expecter{mock: &_m.Mock} } - +// HandleFault provides a mock function with given fields: ctx, fault func (_m *MockFraudHandler) HandleFault(ctx context.Context, fault error) { _m.Called(ctx, fault) } - +// MockFraudHandler_HandleFault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleFault' type MockFraudHandler_HandleFault_Call struct { *mock.Call } - - - +// HandleFault is a helper method to define mock.On call +// - ctx context.Context +// - fault error func (_e *MockFraudHandler_Expecter) HandleFault(ctx interface{}, fault interface{}) *MockFraudHandler_HandleFault_Call { return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", ctx, fault)} } @@ -55,8 +55,8 @@ func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(context.Conte return _c } - - +// NewMockFraudHandler creates a new instance of MockFraudHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockFraudHandler(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go index b591d3572..6a52c1df8 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go +++ b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package avail @@ -14,7 +14,7 @@ import ( types "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) - +// MockSubstrateApiI is an autogenerated mock type for the SubstrateApiI type type MockSubstrateApiI struct { mock.Mock } @@ -27,7 +27,7 @@ func (_m *MockSubstrateApiI) EXPECT() *MockSubstrateApiI_Expecter { return &MockSubstrateApiI_Expecter{mock: &_m.Mock} } - +// GetBlock provides a mock function with given fields: blockHash func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, error) { ret := _m.Called(blockHash) @@ -57,13 +57,13 @@ func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, return r0, r1 } - +// MockSubstrateApiI_GetBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlock' type MockSubstrateApiI_GetBlock_Call struct { *mock.Call } - - +// GetBlock is a helper method to define mock.On call +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetBlock(blockHash interface{}) *MockSubstrateApiI_GetBlock_Call { return &MockSubstrateApiI_GetBlock_Call{Call: _e.mock.On("GetBlock", blockHash)} } @@ -85,7 +85,7 @@ func (_c *MockSubstrateApiI_GetBlock_Call) RunAndReturn(run func(types.Hash) (*t return _c } - +// GetBlockHash provides a mock function with given fields: blockNumber func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error) { ret := _m.Called(blockNumber) @@ -115,13 +115,13 @@ func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error return r0, r1 } - +// MockSubstrateApiI_GetBlockHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHash' type MockSubstrateApiI_GetBlockHash_Call struct { *mock.Call } - - +// GetBlockHash is a helper method to define mock.On call +// - blockNumber uint64 func (_e *MockSubstrateApiI_Expecter) GetBlockHash(blockNumber interface{}) *MockSubstrateApiI_GetBlockHash_Call { return &MockSubstrateApiI_GetBlockHash_Call{Call: _e.mock.On("GetBlockHash", blockNumber)} } @@ -143,7 +143,7 @@ func (_c *MockSubstrateApiI_GetBlockHash_Call) RunAndReturn(run func(uint64) (ty return _c } - +// GetBlockHashLatest provides a mock function with given fields: func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { ret := _m.Called() @@ -173,12 +173,12 @@ func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { return r0, r1 } - +// MockSubstrateApiI_GetBlockHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHashLatest' type MockSubstrateApiI_GetBlockHashLatest_Call struct { *mock.Call } - +// GetBlockHashLatest is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetBlockHashLatest() *MockSubstrateApiI_GetBlockHashLatest_Call { return &MockSubstrateApiI_GetBlockHashLatest_Call{Call: _e.mock.On("GetBlockHashLatest")} } @@ -200,7 +200,7 @@ func (_c *MockSubstrateApiI_GetBlockHashLatest_Call) RunAndReturn(run func() (ty return _c } - +// GetBlockLatest provides a mock function with given fields: func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { ret := _m.Called() @@ -230,12 +230,12 @@ func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { return r0, r1 } - +// MockSubstrateApiI_GetBlockLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockLatest' type MockSubstrateApiI_GetBlockLatest_Call struct { *mock.Call } - +// GetBlockLatest is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetBlockLatest() *MockSubstrateApiI_GetBlockLatest_Call { return &MockSubstrateApiI_GetBlockLatest_Call{Call: _e.mock.On("GetBlockLatest")} } @@ -257,7 +257,7 @@ func (_c *MockSubstrateApiI_GetBlockLatest_Call) RunAndReturn(run func() (*types return _c } - +// GetChildKeys provides a mock function with given fields: childStorageKey, prefix, blockHash func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix, blockHash) @@ -287,15 +287,15 @@ func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, pref return r0, r1 } - +// MockSubstrateApiI_GetChildKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeys' type MockSubstrateApiI_GetChildKeys_Call struct { *mock.Call } - - - - +// GetChildKeys is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - prefix types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetChildKeys(childStorageKey interface{}, prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildKeys_Call { return &MockSubstrateApiI_GetChildKeys_Call{Call: _e.mock.On("GetChildKeys", childStorageKey, prefix, blockHash)} } @@ -317,7 +317,7 @@ func (_c *MockSubstrateApiI_GetChildKeys_Call) RunAndReturn(run func(types.Stora return _c } - +// GetChildKeysLatest provides a mock function with given fields: childStorageKey, prefix func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey, prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix) @@ -347,14 +347,14 @@ func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey return r0, r1 } - +// MockSubstrateApiI_GetChildKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeysLatest' type MockSubstrateApiI_GetChildKeysLatest_Call struct { *mock.Call } - - - +// GetChildKeysLatest is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - prefix types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetChildKeysLatest(childStorageKey interface{}, prefix interface{}) *MockSubstrateApiI_GetChildKeysLatest_Call { return &MockSubstrateApiI_GetChildKeysLatest_Call{Call: _e.mock.On("GetChildKeysLatest", childStorageKey, prefix)} } @@ -376,7 +376,7 @@ func (_c *MockSubstrateApiI_GetChildKeysLatest_Call) RunAndReturn(run func(types return _c } - +// GetChildStorage provides a mock function with given fields: childStorageKey, key, target, blockHash func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(childStorageKey, key, target, blockHash) @@ -404,16 +404,16 @@ func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, k return r0, r1 } - +// MockSubstrateApiI_GetChildStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorage' type MockSubstrateApiI_GetChildStorage_Call struct { *mock.Call } - - - - - +// GetChildStorage is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey +// - target interface{} +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetChildStorage(childStorageKey interface{}, key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorage_Call { return &MockSubstrateApiI_GetChildStorage_Call{Call: _e.mock.On("GetChildStorage", childStorageKey, key, target, blockHash)} } @@ -435,7 +435,7 @@ func (_c *MockSubstrateApiI_GetChildStorage_Call) RunAndReturn(run func(types.St return _c } - +// GetChildStorageHash provides a mock function with given fields: childStorageKey, key, blockHash func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -465,15 +465,15 @@ func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKe return r0, r1 } - +// MockSubstrateApiI_GetChildStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHash' type MockSubstrateApiI_GetChildStorageHash_Call struct { *mock.Call } - - - - +// GetChildStorageHash is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetChildStorageHash(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageHash_Call { return &MockSubstrateApiI_GetChildStorageHash_Call{Call: _e.mock.On("GetChildStorageHash", childStorageKey, key, blockHash)} } @@ -495,7 +495,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHash_Call) RunAndReturn(run func(type return _c } - +// GetChildStorageHashLatest provides a mock function with given fields: childStorageKey, key func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.Hash, error) { ret := _m.Called(childStorageKey, key) @@ -525,14 +525,14 @@ func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.Sto return r0, r1 } - +// MockSubstrateApiI_GetChildStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHashLatest' type MockSubstrateApiI_GetChildStorageHashLatest_Call struct { *mock.Call } - - - +// GetChildStorageHashLatest is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetChildStorageHashLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageHashLatest_Call { return &MockSubstrateApiI_GetChildStorageHashLatest_Call{Call: _e.mock.On("GetChildStorageHashLatest", childStorageKey, key)} } @@ -554,7 +554,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHashLatest_Call) RunAndReturn(run fun return _c } - +// GetChildStorageLatest provides a mock function with given fields: childStorageKey, key, target func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.StorageKey, key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(childStorageKey, key, target) @@ -582,15 +582,15 @@ func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.Storage return r0, r1 } - +// MockSubstrateApiI_GetChildStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageLatest' type MockSubstrateApiI_GetChildStorageLatest_Call struct { *mock.Call } - - - - +// GetChildStorageLatest is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey +// - target interface{} func (_e *MockSubstrateApiI_Expecter) GetChildStorageLatest(childStorageKey interface{}, key interface{}, target interface{}) *MockSubstrateApiI_GetChildStorageLatest_Call { return &MockSubstrateApiI_GetChildStorageLatest_Call{Call: _e.mock.On("GetChildStorageLatest", childStorageKey, key, target)} } @@ -612,7 +612,7 @@ func (_c *MockSubstrateApiI_GetChildStorageLatest_Call) RunAndReturn(run func(ty return _c } - +// GetChildStorageRaw provides a mock function with given fields: childStorageKey, key, blockHash func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -642,15 +642,15 @@ func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey return r0, r1 } - +// MockSubstrateApiI_GetChildStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRaw' type MockSubstrateApiI_GetChildStorageRaw_Call struct { *mock.Call } - - - - +// GetChildStorageRaw is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetChildStorageRaw(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageRaw_Call { return &MockSubstrateApiI_GetChildStorageRaw_Call{Call: _e.mock.On("GetChildStorageRaw", childStorageKey, key, blockHash)} } @@ -672,7 +672,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRaw_Call) RunAndReturn(run func(types return _c } - +// GetChildStorageRawLatest provides a mock function with given fields: childStorageKey, key func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.StorageKey, key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key) @@ -702,14 +702,14 @@ func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.Stor return r0, r1 } - +// MockSubstrateApiI_GetChildStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRawLatest' type MockSubstrateApiI_GetChildStorageRawLatest_Call struct { *mock.Call } - - - +// GetChildStorageRawLatest is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetChildStorageRawLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageRawLatest_Call { return &MockSubstrateApiI_GetChildStorageRawLatest_Call{Call: _e.mock.On("GetChildStorageRawLatest", childStorageKey, key)} } @@ -731,7 +731,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRawLatest_Call) RunAndReturn(run func return _c } - +// GetChildStorageSize provides a mock function with given fields: childStorageKey, key, blockHash func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -759,15 +759,15 @@ func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKe return r0, r1 } - +// MockSubstrateApiI_GetChildStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSize' type MockSubstrateApiI_GetChildStorageSize_Call struct { *mock.Call } - - - - +// GetChildStorageSize is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetChildStorageSize(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageSize_Call { return &MockSubstrateApiI_GetChildStorageSize_Call{Call: _e.mock.On("GetChildStorageSize", childStorageKey, key, blockHash)} } @@ -789,7 +789,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSize_Call) RunAndReturn(run func(type return _c } - +// GetChildStorageSizeLatest provides a mock function with given fields: childStorageKey, key func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.U64, error) { ret := _m.Called(childStorageKey, key) @@ -817,14 +817,14 @@ func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.Sto return r0, r1 } - +// MockSubstrateApiI_GetChildStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSizeLatest' type MockSubstrateApiI_GetChildStorageSizeLatest_Call struct { *mock.Call } - - - +// GetChildStorageSizeLatest is a helper method to define mock.On call +// - childStorageKey types.StorageKey +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetChildStorageSizeLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageSizeLatest_Call { return &MockSubstrateApiI_GetChildStorageSizeLatest_Call{Call: _e.mock.On("GetChildStorageSizeLatest", childStorageKey, key)} } @@ -846,7 +846,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSizeLatest_Call) RunAndReturn(run fun return _c } - +// GetFinalizedHead provides a mock function with given fields: func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { ret := _m.Called() @@ -876,12 +876,12 @@ func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { return r0, r1 } - +// MockSubstrateApiI_GetFinalizedHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFinalizedHead' type MockSubstrateApiI_GetFinalizedHead_Call struct { *mock.Call } - +// GetFinalizedHead is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetFinalizedHead() *MockSubstrateApiI_GetFinalizedHead_Call { return &MockSubstrateApiI_GetFinalizedHead_Call{Call: _e.mock.On("GetFinalizedHead")} } @@ -903,7 +903,7 @@ func (_c *MockSubstrateApiI_GetFinalizedHead_Call) RunAndReturn(run func() (type return _c } - +// GetHeader provides a mock function with given fields: blockHash func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, error) { ret := _m.Called(blockHash) @@ -933,13 +933,13 @@ func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, err return r0, r1 } - +// MockSubstrateApiI_GetHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeader' type MockSubstrateApiI_GetHeader_Call struct { *mock.Call } - - +// GetHeader is a helper method to define mock.On call +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetHeader(blockHash interface{}) *MockSubstrateApiI_GetHeader_Call { return &MockSubstrateApiI_GetHeader_Call{Call: _e.mock.On("GetHeader", blockHash)} } @@ -961,7 +961,7 @@ func (_c *MockSubstrateApiI_GetHeader_Call) RunAndReturn(run func(types.Hash) (* return _c } - +// GetHeaderLatest provides a mock function with given fields: func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { ret := _m.Called() @@ -991,12 +991,12 @@ func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { return r0, r1 } - +// MockSubstrateApiI_GetHeaderLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeaderLatest' type MockSubstrateApiI_GetHeaderLatest_Call struct { *mock.Call } - +// GetHeaderLatest is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetHeaderLatest() *MockSubstrateApiI_GetHeaderLatest_Call { return &MockSubstrateApiI_GetHeaderLatest_Call{Call: _e.mock.On("GetHeaderLatest")} } @@ -1018,7 +1018,7 @@ func (_c *MockSubstrateApiI_GetHeaderLatest_Call) RunAndReturn(run func() (*type return _c } - +// GetKeys provides a mock function with given fields: prefix, blockHash func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(prefix, blockHash) @@ -1048,14 +1048,14 @@ func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Ha return r0, r1 } - +// MockSubstrateApiI_GetKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeys' type MockSubstrateApiI_GetKeys_Call struct { *mock.Call } - - - +// GetKeys is a helper method to define mock.On call +// - prefix types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetKeys(prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetKeys_Call { return &MockSubstrateApiI_GetKeys_Call{Call: _e.mock.On("GetKeys", prefix, blockHash)} } @@ -1077,7 +1077,7 @@ func (_c *MockSubstrateApiI_GetKeys_Call) RunAndReturn(run func(types.StorageKey return _c } - +// GetKeysLatest provides a mock function with given fields: prefix func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(prefix) @@ -1107,13 +1107,13 @@ func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.Sto return r0, r1 } - +// MockSubstrateApiI_GetKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeysLatest' type MockSubstrateApiI_GetKeysLatest_Call struct { *mock.Call } - - +// GetKeysLatest is a helper method to define mock.On call +// - prefix types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetKeysLatest(prefix interface{}) *MockSubstrateApiI_GetKeysLatest_Call { return &MockSubstrateApiI_GetKeysLatest_Call{Call: _e.mock.On("GetKeysLatest", prefix)} } @@ -1135,7 +1135,7 @@ func (_c *MockSubstrateApiI_GetKeysLatest_Call) RunAndReturn(run func(types.Stor return _c } - +// GetMetadata provides a mock function with given fields: blockHash func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, error) { ret := _m.Called(blockHash) @@ -1165,13 +1165,13 @@ func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, return r0, r1 } - +// MockSubstrateApiI_GetMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadata' type MockSubstrateApiI_GetMetadata_Call struct { *mock.Call } - - +// GetMetadata is a helper method to define mock.On call +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetMetadata(blockHash interface{}) *MockSubstrateApiI_GetMetadata_Call { return &MockSubstrateApiI_GetMetadata_Call{Call: _e.mock.On("GetMetadata", blockHash)} } @@ -1193,7 +1193,7 @@ func (_c *MockSubstrateApiI_GetMetadata_Call) RunAndReturn(run func(types.Hash) return _c } - +// GetMetadataLatest provides a mock function with given fields: func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { ret := _m.Called() @@ -1223,12 +1223,12 @@ func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { return r0, r1 } - +// MockSubstrateApiI_GetMetadataLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadataLatest' type MockSubstrateApiI_GetMetadataLatest_Call struct { *mock.Call } - +// GetMetadataLatest is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetMetadataLatest() *MockSubstrateApiI_GetMetadataLatest_Call { return &MockSubstrateApiI_GetMetadataLatest_Call{Call: _e.mock.On("GetMetadataLatest")} } @@ -1250,7 +1250,7 @@ func (_c *MockSubstrateApiI_GetMetadataLatest_Call) RunAndReturn(run func() (*ty return _c } - +// GetRuntimeVersion provides a mock function with given fields: blockHash func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.RuntimeVersion, error) { ret := _m.Called(blockHash) @@ -1280,13 +1280,13 @@ func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.Run return r0, r1 } - +// MockSubstrateApiI_GetRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersion' type MockSubstrateApiI_GetRuntimeVersion_Call struct { *mock.Call } - - +// GetRuntimeVersion is a helper method to define mock.On call +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersion(blockHash interface{}) *MockSubstrateApiI_GetRuntimeVersion_Call { return &MockSubstrateApiI_GetRuntimeVersion_Call{Call: _e.mock.On("GetRuntimeVersion", blockHash)} } @@ -1308,7 +1308,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersion_Call) RunAndReturn(run func(types. return _c } - +// GetRuntimeVersionLatest provides a mock function with given fields: func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, error) { ret := _m.Called() @@ -1338,12 +1338,12 @@ func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, e return r0, r1 } - +// MockSubstrateApiI_GetRuntimeVersionLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersionLatest' type MockSubstrateApiI_GetRuntimeVersionLatest_Call struct { *mock.Call } - +// GetRuntimeVersionLatest is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersionLatest() *MockSubstrateApiI_GetRuntimeVersionLatest_Call { return &MockSubstrateApiI_GetRuntimeVersionLatest_Call{Call: _e.mock.On("GetRuntimeVersionLatest")} } @@ -1365,7 +1365,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersionLatest_Call) RunAndReturn(run func( return _c } - +// GetStorage provides a mock function with given fields: key, target, blockHash func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(key, target, blockHash) @@ -1393,15 +1393,15 @@ func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{} return r0, r1 } - +// MockSubstrateApiI_GetStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorage' type MockSubstrateApiI_GetStorage_Call struct { *mock.Call } - - - - +// GetStorage is a helper method to define mock.On call +// - key types.StorageKey +// - target interface{} +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetStorage(key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorage_Call { return &MockSubstrateApiI_GetStorage_Call{Call: _e.mock.On("GetStorage", key, target, blockHash)} } @@ -1423,7 +1423,7 @@ func (_c *MockSubstrateApiI_GetStorage_Call) RunAndReturn(run func(types.Storage return _c } - +// GetStorageHash provides a mock function with given fields: key, blockHash func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(key, blockHash) @@ -1453,14 +1453,14 @@ func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash type return r0, r1 } - +// MockSubstrateApiI_GetStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHash' type MockSubstrateApiI_GetStorageHash_Call struct { *mock.Call } - - - +// GetStorageHash is a helper method to define mock.On call +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetStorageHash(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageHash_Call { return &MockSubstrateApiI_GetStorageHash_Call{Call: _e.mock.On("GetStorageHash", key, blockHash)} } @@ -1482,7 +1482,7 @@ func (_c *MockSubstrateApiI_GetStorageHash_Call) RunAndReturn(run func(types.Sto return _c } - +// GetStorageHashLatest provides a mock function with given fields: key func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.Hash, error) { ret := _m.Called(key) @@ -1512,13 +1512,13 @@ func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.H return r0, r1 } - +// MockSubstrateApiI_GetStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHashLatest' type MockSubstrateApiI_GetStorageHashLatest_Call struct { *mock.Call } - - +// GetStorageHashLatest is a helper method to define mock.On call +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetStorageHashLatest(key interface{}) *MockSubstrateApiI_GetStorageHashLatest_Call { return &MockSubstrateApiI_GetStorageHashLatest_Call{Call: _e.mock.On("GetStorageHashLatest", key)} } @@ -1540,7 +1540,7 @@ func (_c *MockSubstrateApiI_GetStorageHashLatest_Call) RunAndReturn(run func(typ return _c } - +// GetStorageLatest provides a mock function with given fields: key, target func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(key, target) @@ -1568,14 +1568,14 @@ func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target inter return r0, r1 } - +// MockSubstrateApiI_GetStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageLatest' type MockSubstrateApiI_GetStorageLatest_Call struct { *mock.Call } - - - +// GetStorageLatest is a helper method to define mock.On call +// - key types.StorageKey +// - target interface{} func (_e *MockSubstrateApiI_Expecter) GetStorageLatest(key interface{}, target interface{}) *MockSubstrateApiI_GetStorageLatest_Call { return &MockSubstrateApiI_GetStorageLatest_Call{Call: _e.mock.On("GetStorageLatest", key, target)} } @@ -1597,7 +1597,7 @@ func (_c *MockSubstrateApiI_GetStorageLatest_Call) RunAndReturn(run func(types.S return _c } - +// GetStorageRaw provides a mock function with given fields: key, blockHash func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(key, blockHash) @@ -1627,14 +1627,14 @@ func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types return r0, r1 } - +// MockSubstrateApiI_GetStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRaw' type MockSubstrateApiI_GetStorageRaw_Call struct { *mock.Call } - - - +// GetStorageRaw is a helper method to define mock.On call +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetStorageRaw(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageRaw_Call { return &MockSubstrateApiI_GetStorageRaw_Call{Call: _e.mock.On("GetStorageRaw", key, blockHash)} } @@ -1656,7 +1656,7 @@ func (_c *MockSubstrateApiI_GetStorageRaw_Call) RunAndReturn(run func(types.Stor return _c } - +// GetStorageRawLatest provides a mock function with given fields: key func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(key) @@ -1686,13 +1686,13 @@ func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.S return r0, r1 } - +// MockSubstrateApiI_GetStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRawLatest' type MockSubstrateApiI_GetStorageRawLatest_Call struct { *mock.Call } - - +// GetStorageRawLatest is a helper method to define mock.On call +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetStorageRawLatest(key interface{}) *MockSubstrateApiI_GetStorageRawLatest_Call { return &MockSubstrateApiI_GetStorageRawLatest_Call{Call: _e.mock.On("GetStorageRawLatest", key)} } @@ -1714,7 +1714,7 @@ func (_c *MockSubstrateApiI_GetStorageRawLatest_Call) RunAndReturn(run func(type return _c } - +// GetStorageSize provides a mock function with given fields: key, blockHash func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(key, blockHash) @@ -1742,14 +1742,14 @@ func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash type return r0, r1 } - +// MockSubstrateApiI_GetStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSize' type MockSubstrateApiI_GetStorageSize_Call struct { *mock.Call } - - - +// GetStorageSize is a helper method to define mock.On call +// - key types.StorageKey +// - blockHash types.Hash func (_e *MockSubstrateApiI_Expecter) GetStorageSize(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageSize_Call { return &MockSubstrateApiI_GetStorageSize_Call{Call: _e.mock.On("GetStorageSize", key, blockHash)} } @@ -1771,7 +1771,7 @@ func (_c *MockSubstrateApiI_GetStorageSize_Call) RunAndReturn(run func(types.Sto return _c } - +// GetStorageSizeLatest provides a mock function with given fields: key func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U64, error) { ret := _m.Called(key) @@ -1799,13 +1799,13 @@ func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U return r0, r1 } - +// MockSubstrateApiI_GetStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSizeLatest' type MockSubstrateApiI_GetStorageSizeLatest_Call struct { *mock.Call } - - +// GetStorageSizeLatest is a helper method to define mock.On call +// - key types.StorageKey func (_e *MockSubstrateApiI_Expecter) GetStorageSizeLatest(key interface{}) *MockSubstrateApiI_GetStorageSizeLatest_Call { return &MockSubstrateApiI_GetStorageSizeLatest_Call{Call: _e.mock.On("GetStorageSizeLatest", key)} } @@ -1827,7 +1827,7 @@ func (_c *MockSubstrateApiI_GetStorageSizeLatest_Call) RunAndReturn(run func(typ return _c } - +// PendingExtrinsics provides a mock function with given fields: func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { ret := _m.Called() @@ -1857,12 +1857,12 @@ func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { return r0, r1 } - +// MockSubstrateApiI_PendingExtrinsics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingExtrinsics' type MockSubstrateApiI_PendingExtrinsics_Call struct { *mock.Call } - +// PendingExtrinsics is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) PendingExtrinsics() *MockSubstrateApiI_PendingExtrinsics_Call { return &MockSubstrateApiI_PendingExtrinsics_Call{Call: _e.mock.On("PendingExtrinsics")} } @@ -1884,7 +1884,7 @@ func (_c *MockSubstrateApiI_PendingExtrinsics_Call) RunAndReturn(run func() ([]t return _c } - +// QueryStorage provides a mock function with given fields: keys, startBlock, block func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock types.Hash, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock, block) @@ -1914,15 +1914,15 @@ func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock ty return r0, r1 } - +// MockSubstrateApiI_QueryStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorage' type MockSubstrateApiI_QueryStorage_Call struct { *mock.Call } - - - - +// QueryStorage is a helper method to define mock.On call +// - keys []types.StorageKey +// - startBlock types.Hash +// - block types.Hash func (_e *MockSubstrateApiI_Expecter) QueryStorage(keys interface{}, startBlock interface{}, block interface{}) *MockSubstrateApiI_QueryStorage_Call { return &MockSubstrateApiI_QueryStorage_Call{Call: _e.mock.On("QueryStorage", keys, startBlock, block)} } @@ -1944,7 +1944,7 @@ func (_c *MockSubstrateApiI_QueryStorage_Call) RunAndReturn(run func([]types.Sto return _c } - +// QueryStorageAt provides a mock function with given fields: keys, block func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, block) @@ -1974,14 +1974,14 @@ func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types return r0, r1 } - +// MockSubstrateApiI_QueryStorageAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAt' type MockSubstrateApiI_QueryStorageAt_Call struct { *mock.Call } - - - +// QueryStorageAt is a helper method to define mock.On call +// - keys []types.StorageKey +// - block types.Hash func (_e *MockSubstrateApiI_Expecter) QueryStorageAt(keys interface{}, block interface{}) *MockSubstrateApiI_QueryStorageAt_Call { return &MockSubstrateApiI_QueryStorageAt_Call{Call: _e.mock.On("QueryStorageAt", keys, block)} } @@ -2003,7 +2003,7 @@ func (_c *MockSubstrateApiI_QueryStorageAt_Call) RunAndReturn(run func([]types.S return _c } - +// QueryStorageAtLatest provides a mock function with given fields: keys func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]types.StorageChangeSet, error) { ret := _m.Called(keys) @@ -2033,13 +2033,13 @@ func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]ty return r0, r1 } - +// MockSubstrateApiI_QueryStorageAtLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAtLatest' type MockSubstrateApiI_QueryStorageAtLatest_Call struct { *mock.Call } - - +// QueryStorageAtLatest is a helper method to define mock.On call +// - keys []types.StorageKey func (_e *MockSubstrateApiI_Expecter) QueryStorageAtLatest(keys interface{}) *MockSubstrateApiI_QueryStorageAtLatest_Call { return &MockSubstrateApiI_QueryStorageAtLatest_Call{Call: _e.mock.On("QueryStorageAtLatest", keys)} } @@ -2061,7 +2061,7 @@ func (_c *MockSubstrateApiI_QueryStorageAtLatest_Call) RunAndReturn(run func([]t return _c } - +// QueryStorageLatest provides a mock function with given fields: keys, startBlock func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBlock types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock) @@ -2091,14 +2091,14 @@ func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBl return r0, r1 } - +// MockSubstrateApiI_QueryStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageLatest' type MockSubstrateApiI_QueryStorageLatest_Call struct { *mock.Call } - - - +// QueryStorageLatest is a helper method to define mock.On call +// - keys []types.StorageKey +// - startBlock types.Hash func (_e *MockSubstrateApiI_Expecter) QueryStorageLatest(keys interface{}, startBlock interface{}) *MockSubstrateApiI_QueryStorageLatest_Call { return &MockSubstrateApiI_QueryStorageLatest_Call{Call: _e.mock.On("QueryStorageLatest", keys, startBlock)} } @@ -2120,7 +2120,7 @@ func (_c *MockSubstrateApiI_QueryStorageLatest_Call) RunAndReturn(run func([]typ return _c } - +// SubmitAndWatchExtrinsic provides a mock function with given fields: xt func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*author.ExtrinsicStatusSubscription, error) { ret := _m.Called(xt) @@ -2150,13 +2150,13 @@ func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*autho return r0, r1 } - +// MockSubstrateApiI_SubmitAndWatchExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitAndWatchExtrinsic' type MockSubstrateApiI_SubmitAndWatchExtrinsic_Call struct { *mock.Call } - - +// SubmitAndWatchExtrinsic is a helper method to define mock.On call +// - xt types.Extrinsic func (_e *MockSubstrateApiI_Expecter) SubmitAndWatchExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call { return &MockSubstrateApiI_SubmitAndWatchExtrinsic_Call{Call: _e.mock.On("SubmitAndWatchExtrinsic", xt)} } @@ -2178,7 +2178,7 @@ func (_c *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call) RunAndReturn(run func( return _c } - +// SubmitExtrinsic provides a mock function with given fields: xt func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, error) { ret := _m.Called(xt) @@ -2208,13 +2208,13 @@ func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, er return r0, r1 } - +// MockSubstrateApiI_SubmitExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitExtrinsic' type MockSubstrateApiI_SubmitExtrinsic_Call struct { *mock.Call } - - +// SubmitExtrinsic is a helper method to define mock.On call +// - xt types.Extrinsic func (_e *MockSubstrateApiI_Expecter) SubmitExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitExtrinsic_Call { return &MockSubstrateApiI_SubmitExtrinsic_Call{Call: _e.mock.On("SubmitExtrinsic", xt)} } @@ -2236,7 +2236,7 @@ func (_c *MockSubstrateApiI_SubmitExtrinsic_Call) RunAndReturn(run func(types.Ex return _c } - +// SubscribeFinalizedHeads provides a mock function with given fields: func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSubscription, error) { ret := _m.Called() @@ -2266,12 +2266,12 @@ func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSub return r0, r1 } - +// MockSubstrateApiI_SubscribeFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFinalizedHeads' type MockSubstrateApiI_SubscribeFinalizedHeads_Call struct { *mock.Call } - +// SubscribeFinalizedHeads is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) SubscribeFinalizedHeads() *MockSubstrateApiI_SubscribeFinalizedHeads_Call { return &MockSubstrateApiI_SubscribeFinalizedHeads_Call{Call: _e.mock.On("SubscribeFinalizedHeads")} } @@ -2293,7 +2293,7 @@ func (_c *MockSubstrateApiI_SubscribeFinalizedHeads_Call) RunAndReturn(run func( return _c } - +// SubscribeNewHeads provides a mock function with given fields: func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, error) { ret := _m.Called() @@ -2323,12 +2323,12 @@ func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, e return r0, r1 } - +// MockSubstrateApiI_SubscribeNewHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHeads' type MockSubstrateApiI_SubscribeNewHeads_Call struct { *mock.Call } - +// SubscribeNewHeads is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) SubscribeNewHeads() *MockSubstrateApiI_SubscribeNewHeads_Call { return &MockSubstrateApiI_SubscribeNewHeads_Call{Call: _e.mock.On("SubscribeNewHeads")} } @@ -2350,7 +2350,7 @@ func (_c *MockSubstrateApiI_SubscribeNewHeads_Call) RunAndReturn(run func() (*ch return _c } - +// SubscribeRuntimeVersion provides a mock function with given fields: func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSubscription, error) { ret := _m.Called() @@ -2380,12 +2380,12 @@ func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSub return r0, r1 } - +// MockSubstrateApiI_SubscribeRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeRuntimeVersion' type MockSubstrateApiI_SubscribeRuntimeVersion_Call struct { *mock.Call } - +// SubscribeRuntimeVersion is a helper method to define mock.On call func (_e *MockSubstrateApiI_Expecter) SubscribeRuntimeVersion() *MockSubstrateApiI_SubscribeRuntimeVersion_Call { return &MockSubstrateApiI_SubscribeRuntimeVersion_Call{Call: _e.mock.On("SubscribeRuntimeVersion")} } @@ -2407,7 +2407,7 @@ func (_c *MockSubstrateApiI_SubscribeRuntimeVersion_Call) RunAndReturn(run func( return _c } - +// SubscribeStorageRaw provides a mock function with given fields: keys func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*state.StorageSubscription, error) { ret := _m.Called(keys) @@ -2437,13 +2437,13 @@ func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*stat return r0, r1 } - +// MockSubstrateApiI_SubscribeStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeStorageRaw' type MockSubstrateApiI_SubscribeStorageRaw_Call struct { *mock.Call } - - +// SubscribeStorageRaw is a helper method to define mock.On call +// - keys []types.StorageKey func (_e *MockSubstrateApiI_Expecter) SubscribeStorageRaw(keys interface{}) *MockSubstrateApiI_SubscribeStorageRaw_Call { return &MockSubstrateApiI_SubscribeStorageRaw_Call{Call: _e.mock.On("SubscribeStorageRaw", keys)} } @@ -2465,8 +2465,8 @@ func (_c *MockSubstrateApiI_SubscribeStorageRaw_Call) RunAndReturn(run func([]ty return _c } - - +// NewMockSubstrateApiI creates a new instance of MockSubstrateApiI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockSubstrateApiI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go index cb248d62a..f80184e4f 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package types @@ -16,7 +16,7 @@ import ( share "github.com/celestiaorg/celestia-openrpc/types/share" ) - +// MockCelestiaRPCClient is an autogenerated mock type for the CelestiaRPCClient type type MockCelestiaRPCClient struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockCelestiaRPCClient) EXPECT() *MockCelestiaRPCClient_Expecter { return &MockCelestiaRPCClient_Expecter{mock: &_m.Mock} } - +// Get provides a mock function with given fields: ctx, height, namespace, commitment func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -59,16 +59,16 @@ func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespa return r0, r1 } - +// MockCelestiaRPCClient_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' type MockCelestiaRPCClient_Get_Call struct { *mock.Call } - - - - - +// Get is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +// - namespace share.Namespace +// - commitment blob.Commitment func (_e *MockCelestiaRPCClient_Expecter) Get(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_Get_Call { return &MockCelestiaRPCClient_Get_Call{Call: _e.mock.On("Get", ctx, height, namespace, commitment)} } @@ -90,7 +90,7 @@ func (_c *MockCelestiaRPCClient_Get_Call) RunAndReturn(run func(context.Context, return _c } - +// GetAll provides a mock function with given fields: _a0, _a1, _a2 func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []share.Namespace) ([]*blob.Blob, error) { ret := _m.Called(_a0, _a1, _a2) @@ -120,15 +120,15 @@ func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []s return r0, r1 } - +// MockCelestiaRPCClient_GetAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAll' type MockCelestiaRPCClient_GetAll_Call struct { *mock.Call } - - - - +// GetAll is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 uint64 +// - _a2 []share.Namespace func (_e *MockCelestiaRPCClient_Expecter) GetAll(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockCelestiaRPCClient_GetAll_Call { return &MockCelestiaRPCClient_GetAll_Call{Call: _e.mock.On("GetAll", _a0, _a1, _a2)} } @@ -150,7 +150,7 @@ func (_c *MockCelestiaRPCClient_GetAll_Call) RunAndReturn(run func(context.Conte return _c } - +// GetByHeight provides a mock function with given fields: ctx, height func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { ret := _m.Called(ctx, height) @@ -180,14 +180,14 @@ func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) return r0, r1 } - +// MockCelestiaRPCClient_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' type MockCelestiaRPCClient_GetByHeight_Call struct { *mock.Call } - - - +// GetByHeight is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 func (_e *MockCelestiaRPCClient_Expecter) GetByHeight(ctx interface{}, height interface{}) *MockCelestiaRPCClient_GetByHeight_Call { return &MockCelestiaRPCClient_GetByHeight_Call{Call: _e.mock.On("GetByHeight", ctx, height)} } @@ -209,7 +209,7 @@ func (_c *MockCelestiaRPCClient_GetByHeight_Call) RunAndReturn(run func(context. return _c } - +// GetProof provides a mock function with given fields: ctx, height, namespace, commitment func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -239,16 +239,16 @@ func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, na return r0, r1 } - +// MockCelestiaRPCClient_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' type MockCelestiaRPCClient_GetProof_Call struct { *mock.Call } - - - - - +// GetProof is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +// - namespace share.Namespace +// - commitment blob.Commitment func (_e *MockCelestiaRPCClient_Expecter) GetProof(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_GetProof_Call { return &MockCelestiaRPCClient_GetProof_Call{Call: _e.mock.On("GetProof", ctx, height, namespace, commitment)} } @@ -270,7 +270,7 @@ func (_c *MockCelestiaRPCClient_GetProof_Call) RunAndReturn(run func(context.Con return _c } - +// GetSignerBalance provides a mock function with given fields: ctx func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coin, error) { ret := _m.Called(ctx) @@ -300,13 +300,13 @@ func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coi return r0, r1 } - +// MockCelestiaRPCClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' type MockCelestiaRPCClient_GetSignerBalance_Call struct { *mock.Call } - - +// GetSignerBalance is a helper method to define mock.On call +// - ctx context.Context func (_e *MockCelestiaRPCClient_Expecter) GetSignerBalance(ctx interface{}) *MockCelestiaRPCClient_GetSignerBalance_Call { return &MockCelestiaRPCClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance", ctx)} } @@ -328,7 +328,7 @@ func (_c *MockCelestiaRPCClient_GetSignerBalance_Call) RunAndReturn(run func(con return _c } - +// Included provides a mock function with given fields: ctx, height, namespace, proof, commitment func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { ret := _m.Called(ctx, height, namespace, proof, commitment) @@ -356,17 +356,17 @@ func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, na return r0, r1 } - +// MockCelestiaRPCClient_Included_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Included' type MockCelestiaRPCClient_Included_Call struct { *mock.Call } - - - - - - +// Included is a helper method to define mock.On call +// - ctx context.Context +// - height uint64 +// - namespace share.Namespace +// - proof *blob.Proof +// - commitment blob.Commitment func (_e *MockCelestiaRPCClient_Expecter) Included(ctx interface{}, height interface{}, namespace interface{}, proof interface{}, commitment interface{}) *MockCelestiaRPCClient_Included_Call { return &MockCelestiaRPCClient_Included_Call{Call: _e.mock.On("Included", ctx, height, namespace, proof, commitment)} } @@ -388,7 +388,7 @@ func (_c *MockCelestiaRPCClient_Included_Call) RunAndReturn(run func(context.Con return _c } - +// Submit provides a mock function with given fields: ctx, blobs, options func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { ret := _m.Called(ctx, blobs, options) @@ -416,15 +416,15 @@ func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, return r0, r1 } - +// MockCelestiaRPCClient_Submit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Submit' type MockCelestiaRPCClient_Submit_Call struct { *mock.Call } - - - - +// Submit is a helper method to define mock.On call +// - ctx context.Context +// - blobs []*blob.Blob +// - options *blob.SubmitOptions func (_e *MockCelestiaRPCClient_Expecter) Submit(ctx interface{}, blobs interface{}, options interface{}) *MockCelestiaRPCClient_Submit_Call { return &MockCelestiaRPCClient_Submit_Call{Call: _e.mock.On("Submit", ctx, blobs, options)} } @@ -446,8 +446,8 @@ func (_c *MockCelestiaRPCClient_Submit_Call) RunAndReturn(run func(context.Conte return _c } - - +// NewMockCelestiaRPCClient creates a new instance of MockCelestiaRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockCelestiaRPCClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go index c116222ed..9c20b8b5c 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package da @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - +// MockDataAvailabilityLayerClient is an autogenerated mock type for the DataAvailabilityLayerClient type type MockDataAvailabilityLayerClient struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockDataAvailabilityLayerClient) EXPECT() *MockDataAvailabilityLayerCl return &MockDataAvailabilityLayerClient_Expecter{mock: &_m.Mock} } - +// CheckBatchAvailability provides a mock function with given fields: daMetaData func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { ret := _m.Called(daMetaData) @@ -44,13 +44,13 @@ func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da return r0 } - +// MockDataAvailabilityLayerClient_CheckBatchAvailability_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckBatchAvailability' type MockDataAvailabilityLayerClient_CheckBatchAvailability_Call struct { *mock.Call } - - +// CheckBatchAvailability is a helper method to define mock.On call +// - daMetaData *da.DASubmitMetaData func (_e *MockDataAvailabilityLayerClient_Expecter) CheckBatchAvailability(daMetaData interface{}) *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call { return &MockDataAvailabilityLayerClient_CheckBatchAvailability_Call{Call: _e.mock.On("CheckBatchAvailability", daMetaData)} } @@ -72,7 +72,7 @@ func (_c *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call) RunAndRet return _c } - +// GetClientType provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { ret := _m.Called() @@ -90,12 +90,12 @@ func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { return r0 } - +// MockDataAvailabilityLayerClient_GetClientType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClientType' type MockDataAvailabilityLayerClient_GetClientType_Call struct { *mock.Call } - +// GetClientType is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) GetClientType() *MockDataAvailabilityLayerClient_GetClientType_Call { return &MockDataAvailabilityLayerClient_GetClientType_Call{Call: _e.mock.On("GetClientType")} } @@ -117,7 +117,7 @@ func (_c *MockDataAvailabilityLayerClient_GetClientType_Call) RunAndReturn(run f return _c } - +// GetMaxBlobSizeBytes provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { ret := _m.Called() @@ -135,12 +135,12 @@ func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return r0 } - +// MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMaxBlobSizeBytes' type MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call struct { *mock.Call } - +// GetMaxBlobSizeBytes is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) GetMaxBlobSizeBytes() *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call { return &MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call{Call: _e.mock.On("GetMaxBlobSizeBytes")} } @@ -162,7 +162,7 @@ func (_c *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call) RunAndReturn return _c } - +// GetSignerBalance provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ret := _m.Called() @@ -190,12 +190,12 @@ func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error return r0, r1 } - +// MockDataAvailabilityLayerClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' type MockDataAvailabilityLayerClient_GetSignerBalance_Call struct { *mock.Call } - +// GetSignerBalance is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) GetSignerBalance() *MockDataAvailabilityLayerClient_GetSignerBalance_Call { return &MockDataAvailabilityLayerClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -217,7 +217,7 @@ func (_c *MockDataAvailabilityLayerClient_GetSignerBalance_Call) RunAndReturn(ru return _c } - +// Init provides a mock function with given fields: config, pubsubServer, kvStore, logger, options func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...da.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -242,17 +242,17 @@ func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pub return r0 } - +// MockDataAvailabilityLayerClient_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' type MockDataAvailabilityLayerClient_Init_Call struct { *mock.Call } - - - - - - +// Init is a helper method to define mock.On call +// - config []byte +// - pubsubServer *pubsub.Server +// - kvStore store.KV +// - logger types.Logger +// - options ...da.Option func (_e *MockDataAvailabilityLayerClient_Expecter) Init(config interface{}, pubsubServer interface{}, kvStore interface{}, logger interface{}, options ...interface{}) *MockDataAvailabilityLayerClient_Init_Call { return &MockDataAvailabilityLayerClient_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, pubsubServer, kvStore, logger}, options...)...)} @@ -281,7 +281,7 @@ func (_c *MockDataAvailabilityLayerClient_Init_Call) RunAndReturn(run func([]byt return _c } - +// Start provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) Start() error { ret := _m.Called() @@ -299,12 +299,12 @@ func (_m *MockDataAvailabilityLayerClient) Start() error { return r0 } - +// MockDataAvailabilityLayerClient_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' type MockDataAvailabilityLayerClient_Start_Call struct { *mock.Call } - +// Start is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) Start() *MockDataAvailabilityLayerClient_Start_Call { return &MockDataAvailabilityLayerClient_Start_Call{Call: _e.mock.On("Start")} } @@ -326,7 +326,7 @@ func (_c *MockDataAvailabilityLayerClient_Start_Call) RunAndReturn(run func() er return _c } - +// Stop provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) Stop() error { ret := _m.Called() @@ -344,12 +344,12 @@ func (_m *MockDataAvailabilityLayerClient) Stop() error { return r0 } - +// MockDataAvailabilityLayerClient_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' type MockDataAvailabilityLayerClient_Stop_Call struct { *mock.Call } - +// Stop is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) Stop() *MockDataAvailabilityLayerClient_Stop_Call { return &MockDataAvailabilityLayerClient_Stop_Call{Call: _e.mock.On("Stop")} } @@ -371,7 +371,7 @@ func (_c *MockDataAvailabilityLayerClient_Stop_Call) RunAndReturn(run func() err return _c } - +// SubmitBatch provides a mock function with given fields: batch func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { ret := _m.Called(batch) @@ -389,13 +389,13 @@ func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.Re return r0 } - +// MockDataAvailabilityLayerClient_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' type MockDataAvailabilityLayerClient_SubmitBatch_Call struct { *mock.Call } - - +// SubmitBatch is a helper method to define mock.On call +// - batch *types.Batch func (_e *MockDataAvailabilityLayerClient_Expecter) SubmitBatch(batch interface{}) *MockDataAvailabilityLayerClient_SubmitBatch_Call { return &MockDataAvailabilityLayerClient_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch)} } @@ -417,17 +417,17 @@ func (_c *MockDataAvailabilityLayerClient_SubmitBatch_Call) RunAndReturn(run fun return _c } - +// WaitForSyncing provides a mock function with given fields: func (_m *MockDataAvailabilityLayerClient) WaitForSyncing() { _m.Called() } - +// MockDataAvailabilityLayerClient_WaitForSyncing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForSyncing' type MockDataAvailabilityLayerClient_WaitForSyncing_Call struct { *mock.Call } - +// WaitForSyncing is a helper method to define mock.On call func (_e *MockDataAvailabilityLayerClient_Expecter) WaitForSyncing() *MockDataAvailabilityLayerClient_WaitForSyncing_Call { return &MockDataAvailabilityLayerClient_WaitForSyncing_Call{Call: _e.mock.On("WaitForSyncing")} } @@ -449,8 +449,8 @@ func (_c *MockDataAvailabilityLayerClient_WaitForSyncing_Call) RunAndReturn(run return _c } - - +// NewMockDataAvailabilityLayerClient creates a new instance of MockDataAvailabilityLayerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockDataAvailabilityLayerClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go index de07e1a71..5396f942b 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) - +// MockProposerGetter is an autogenerated mock type for the ProposerGetter type type MockProposerGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockProposerGetter) EXPECT() *MockProposerGetter_Expecter { return &MockProposerGetter_Expecter{mock: &_m.Mock} } - +// GetProposerPubKey provides a mock function with given fields: func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { return r0 } - +// MockProposerGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' type MockProposerGetter_GetProposerPubKey_Call struct { *mock.Call } - +// GetProposerPubKey is a helper method to define mock.On call func (_e *MockProposerGetter_Expecter) GetProposerPubKey() *MockProposerGetter_GetProposerPubKey_Call { return &MockProposerGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockProposerGetter_GetProposerPubKey_Call) RunAndReturn(run func() cry return _c } - +// GetRevision provides a mock function with given fields: func (_m *MockProposerGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockProposerGetter) GetRevision() uint64 { return r0 } - +// MockProposerGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' type MockProposerGetter_GetRevision_Call struct { *mock.Call } - +// GetRevision is a helper method to define mock.On call func (_e *MockProposerGetter_Expecter) GetRevision() *MockProposerGetter_GetRevision_Call { return &MockProposerGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockProposerGetter_GetRevision_Call) RunAndReturn(run func() uint64) * return _c } - - +// NewMockProposerGetter creates a new instance of MockProposerGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockProposerGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go index 477be16f8..4377638cb 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) - +// MockStateGetter is an autogenerated mock type for the StateGetter type type MockStateGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockStateGetter) EXPECT() *MockStateGetter_Expecter { return &MockStateGetter_Expecter{mock: &_m.Mock} } - +// GetProposerPubKey provides a mock function with given fields: func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { return r0 } - +// MockStateGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' type MockStateGetter_GetProposerPubKey_Call struct { *mock.Call } - +// GetProposerPubKey is a helper method to define mock.On call func (_e *MockStateGetter_Expecter) GetProposerPubKey() *MockStateGetter_GetProposerPubKey_Call { return &MockStateGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockStateGetter_GetProposerPubKey_Call) RunAndReturn(run func() crypto return _c } - +// GetRevision provides a mock function with given fields: func (_m *MockStateGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockStateGetter) GetRevision() uint64 { return r0 } - +// MockStateGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' type MockStateGetter_GetRevision_Call struct { *mock.Call } - +// GetRevision is a helper method to define mock.On call func (_e *MockStateGetter_Expecter) GetRevision() *MockStateGetter_GetRevision_Call { return &MockStateGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockStateGetter_GetRevision_Call) RunAndReturn(run func() uint64) *Moc return _c } - - +// NewMockStateGetter creates a new instance of MockStateGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockStateGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go index f79c856c1..ade8efe9b 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package dymension @@ -22,7 +22,7 @@ import ( types "github.com/cosmos/cosmos-sdk/types" ) - +// MockCosmosClient is an autogenerated mock type for the CosmosClient type type MockCosmosClient struct { mock.Mock } @@ -35,7 +35,7 @@ func (_m *MockCosmosClient) EXPECT() *MockCosmosClient_Expecter { return &MockCosmosClient_Expecter{mock: &_m.Mock} } - +// BroadcastTx provides a mock function with given fields: accountName, msgs func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) (cosmosclient.Response, error) { _va := make([]interface{}, len(msgs)) for _i := range msgs { @@ -70,14 +70,14 @@ func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) ( return r0, r1 } - +// MockCosmosClient_BroadcastTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BroadcastTx' type MockCosmosClient_BroadcastTx_Call struct { *mock.Call } - - - +// BroadcastTx is a helper method to define mock.On call +// - accountName string +// - msgs ...types.Msg func (_e *MockCosmosClient_Expecter) BroadcastTx(accountName interface{}, msgs ...interface{}) *MockCosmosClient_BroadcastTx_Call { return &MockCosmosClient_BroadcastTx_Call{Call: _e.mock.On("BroadcastTx", append([]interface{}{accountName}, msgs...)...)} @@ -106,7 +106,7 @@ func (_c *MockCosmosClient_BroadcastTx_Call) RunAndReturn(run func(string, ...ty return _c } - +// Context provides a mock function with given fields: func (_m *MockCosmosClient) Context() client.Context { ret := _m.Called() @@ -124,12 +124,12 @@ func (_m *MockCosmosClient) Context() client.Context { return r0 } - +// MockCosmosClient_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' type MockCosmosClient_Context_Call struct { *mock.Call } - +// Context is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) Context() *MockCosmosClient_Context_Call { return &MockCosmosClient_Context_Call{Call: _e.mock.On("Context")} } @@ -151,7 +151,7 @@ func (_c *MockCosmosClient_Context_Call) RunAndReturn(run func() client.Context) return _c } - +// EventListenerQuit provides a mock function with given fields: func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { ret := _m.Called() @@ -171,12 +171,12 @@ func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { return r0 } - +// MockCosmosClient_EventListenerQuit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EventListenerQuit' type MockCosmosClient_EventListenerQuit_Call struct { *mock.Call } - +// EventListenerQuit is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) EventListenerQuit() *MockCosmosClient_EventListenerQuit_Call { return &MockCosmosClient_EventListenerQuit_Call{Call: _e.mock.On("EventListenerQuit")} } @@ -198,7 +198,7 @@ func (_c *MockCosmosClient_EventListenerQuit_Call) RunAndReturn(run func() <-cha return _c } - +// GetAccount provides a mock function with given fields: accountName func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Account, error) { ret := _m.Called(accountName) @@ -226,13 +226,13 @@ func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Accoun return r0, r1 } - +// MockCosmosClient_GetAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAccount' type MockCosmosClient_GetAccount_Call struct { *mock.Call } - - +// GetAccount is a helper method to define mock.On call +// - accountName string func (_e *MockCosmosClient_Expecter) GetAccount(accountName interface{}) *MockCosmosClient_GetAccount_Call { return &MockCosmosClient_GetAccount_Call{Call: _e.mock.On("GetAccount", accountName)} } @@ -254,7 +254,7 @@ func (_c *MockCosmosClient_GetAccount_Call) RunAndReturn(run func(string) (cosmo return _c } - +// GetBalance provides a mock function with given fields: ctx, accountName, denom func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, denom string) (*types.Coin, error) { ret := _m.Called(ctx, accountName, denom) @@ -284,15 +284,15 @@ func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, return r0, r1 } - +// MockCosmosClient_GetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBalance' type MockCosmosClient_GetBalance_Call struct { *mock.Call } - - - - +// GetBalance is a helper method to define mock.On call +// - ctx context.Context +// - accountName string +// - denom string func (_e *MockCosmosClient_Expecter) GetBalance(ctx interface{}, accountName interface{}, denom interface{}) *MockCosmosClient_GetBalance_Call { return &MockCosmosClient_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, accountName, denom)} } @@ -314,7 +314,7 @@ func (_c *MockCosmosClient_GetBalance_Call) RunAndReturn(run func(context.Contex return _c } - +// GetRollappClient provides a mock function with given fields: func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { ret := _m.Called() @@ -334,12 +334,12 @@ func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { return r0 } - +// MockCosmosClient_GetRollappClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollappClient' type MockCosmosClient_GetRollappClient_Call struct { *mock.Call } - +// GetRollappClient is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) GetRollappClient() *MockCosmosClient_GetRollappClient_Call { return &MockCosmosClient_GetRollappClient_Call{Call: _e.mock.On("GetRollappClient")} } @@ -361,7 +361,7 @@ func (_c *MockCosmosClient_GetRollappClient_Call) RunAndReturn(run func() rollap return _c } - +// GetSequencerClient provides a mock function with given fields: func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { ret := _m.Called() @@ -381,12 +381,12 @@ func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { return r0 } - +// MockCosmosClient_GetSequencerClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerClient' type MockCosmosClient_GetSequencerClient_Call struct { *mock.Call } - +// GetSequencerClient is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) GetSequencerClient() *MockCosmosClient_GetSequencerClient_Call { return &MockCosmosClient_GetSequencerClient_Call{Call: _e.mock.On("GetSequencerClient")} } @@ -408,7 +408,7 @@ func (_c *MockCosmosClient_GetSequencerClient_Call) RunAndReturn(run func() sequ return _c } - +// StartEventListener provides a mock function with given fields: func (_m *MockCosmosClient) StartEventListener() error { ret := _m.Called() @@ -426,12 +426,12 @@ func (_m *MockCosmosClient) StartEventListener() error { return r0 } - +// MockCosmosClient_StartEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StartEventListener' type MockCosmosClient_StartEventListener_Call struct { *mock.Call } - +// StartEventListener is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) StartEventListener() *MockCosmosClient_StartEventListener_Call { return &MockCosmosClient_StartEventListener_Call{Call: _e.mock.On("StartEventListener")} } @@ -453,7 +453,7 @@ func (_c *MockCosmosClient_StartEventListener_Call) RunAndReturn(run func() erro return _c } - +// StopEventListener provides a mock function with given fields: func (_m *MockCosmosClient) StopEventListener() error { ret := _m.Called() @@ -471,12 +471,12 @@ func (_m *MockCosmosClient) StopEventListener() error { return r0 } - +// MockCosmosClient_StopEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopEventListener' type MockCosmosClient_StopEventListener_Call struct { *mock.Call } - +// StopEventListener is a helper method to define mock.On call func (_e *MockCosmosClient_Expecter) StopEventListener() *MockCosmosClient_StopEventListener_Call { return &MockCosmosClient_StopEventListener_Call{Call: _e.mock.On("StopEventListener")} } @@ -498,7 +498,7 @@ func (_c *MockCosmosClient_StopEventListener_Call) RunAndReturn(run func() error return _c } - +// SubscribeToEvents provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) for _i := range outCapacity { @@ -535,16 +535,16 @@ func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber st return r0, r1 } - +// MockCosmosClient_SubscribeToEvents_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToEvents' type MockCosmosClient_SubscribeToEvents_Call struct { *mock.Call } - - - - - +// SubscribeToEvents is a helper method to define mock.On call +// - ctx context.Context +// - subscriber string +// - query string +// - outCapacity ...int func (_e *MockCosmosClient_Expecter) SubscribeToEvents(ctx interface{}, subscriber interface{}, query interface{}, outCapacity ...interface{}) *MockCosmosClient_SubscribeToEvents_Call { return &MockCosmosClient_SubscribeToEvents_Call{Call: _e.mock.On("SubscribeToEvents", append([]interface{}{ctx, subscriber, query}, outCapacity...)...)} @@ -573,7 +573,7 @@ func (_c *MockCosmosClient_SubscribeToEvents_Call) RunAndReturn(run func(context return _c } - +// UnsubscribeAll provides a mock function with given fields: ctx, subscriber func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) @@ -591,14 +591,14 @@ func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber strin return r0 } - +// MockCosmosClient_UnsubscribeAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAll' type MockCosmosClient_UnsubscribeAll_Call struct { *mock.Call } - - - +// UnsubscribeAll is a helper method to define mock.On call +// - ctx context.Context +// - subscriber string func (_e *MockCosmosClient_Expecter) UnsubscribeAll(ctx interface{}, subscriber interface{}) *MockCosmosClient_UnsubscribeAll_Call { return &MockCosmosClient_UnsubscribeAll_Call{Call: _e.mock.On("UnsubscribeAll", ctx, subscriber)} } @@ -620,8 +620,8 @@ func (_c *MockCosmosClient_UnsubscribeAll_Call) RunAndReturn(run func(context.Co return _c } - - +// NewMockCosmosClient creates a new instance of MockCosmosClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockCosmosClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go index c41be7a74..a609b4d42 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package settlement @@ -15,7 +15,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - +// MockClientI is an autogenerated mock type for the ClientI type type MockClientI struct { mock.Mock } @@ -28,7 +28,7 @@ func (_m *MockClientI) EXPECT() *MockClientI_Expecter { return &MockClientI_Expecter{mock: &_m.Mock} } - +// GetAllSequencers provides a mock function with given fields: func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -58,12 +58,12 @@ func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { return r0, r1 } - +// MockClientI_GetAllSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllSequencers' type MockClientI_GetAllSequencers_Call struct { *mock.Call } - +// GetAllSequencers is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetAllSequencers() *MockClientI_GetAllSequencers_Call { return &MockClientI_GetAllSequencers_Call{Call: _e.mock.On("GetAllSequencers")} } @@ -85,7 +85,7 @@ func (_c *MockClientI_GetAllSequencers_Call) RunAndReturn(run func() ([]types.Se return _c } - +// GetBatchAtHeight provides a mock function with given fields: index func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -115,13 +115,13 @@ func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetriev return r0, r1 } - +// MockClientI_GetBatchAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtHeight' type MockClientI_GetBatchAtHeight_Call struct { *mock.Call } - - +// GetBatchAtHeight is a helper method to define mock.On call +// - index uint64 func (_e *MockClientI_Expecter) GetBatchAtHeight(index interface{}) *MockClientI_GetBatchAtHeight_Call { return &MockClientI_GetBatchAtHeight_Call{Call: _e.mock.On("GetBatchAtHeight", index)} } @@ -143,7 +143,7 @@ func (_c *MockClientI_GetBatchAtHeight_Call) RunAndReturn(run func(uint64) (*set return _c } - +// GetBatchAtIndex provides a mock function with given fields: index func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -173,13 +173,13 @@ func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieve return r0, r1 } - +// MockClientI_GetBatchAtIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtIndex' type MockClientI_GetBatchAtIndex_Call struct { *mock.Call } - - +// GetBatchAtIndex is a helper method to define mock.On call +// - index uint64 func (_e *MockClientI_Expecter) GetBatchAtIndex(index interface{}) *MockClientI_GetBatchAtIndex_Call { return &MockClientI_GetBatchAtIndex_Call{Call: _e.mock.On("GetBatchAtIndex", index)} } @@ -201,7 +201,7 @@ func (_c *MockClientI_GetBatchAtIndex_Call) RunAndReturn(run func(uint64) (*sett return _c } - +// GetBondedSequencers provides a mock function with given fields: func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -231,12 +231,12 @@ func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { return r0, r1 } - +// MockClientI_GetBondedSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBondedSequencers' type MockClientI_GetBondedSequencers_Call struct { *mock.Call } - +// GetBondedSequencers is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetBondedSequencers() *MockClientI_GetBondedSequencers_Call { return &MockClientI_GetBondedSequencers_Call{Call: _e.mock.On("GetBondedSequencers")} } @@ -258,7 +258,7 @@ func (_c *MockClientI_GetBondedSequencers_Call) RunAndReturn(run func() ([]types return _c } - +// GetLatestBatch provides a mock function with given fields: func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { ret := _m.Called() @@ -288,12 +288,12 @@ func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) return r0, r1 } - +// MockClientI_GetLatestBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBatch' type MockClientI_GetLatestBatch_Call struct { *mock.Call } - +// GetLatestBatch is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetLatestBatch() *MockClientI_GetLatestBatch_Call { return &MockClientI_GetLatestBatch_Call{Call: _e.mock.On("GetLatestBatch")} } @@ -315,7 +315,7 @@ func (_c *MockClientI_GetLatestBatch_Call) RunAndReturn(run func() (*settlement. return _c } - +// GetLatestFinalizedHeight provides a mock function with given fields: func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { ret := _m.Called() @@ -343,12 +343,12 @@ func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { return r0, r1 } - +// MockClientI_GetLatestFinalizedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestFinalizedHeight' type MockClientI_GetLatestFinalizedHeight_Call struct { *mock.Call } - +// GetLatestFinalizedHeight is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetLatestFinalizedHeight() *MockClientI_GetLatestFinalizedHeight_Call { return &MockClientI_GetLatestFinalizedHeight_Call{Call: _e.mock.On("GetLatestFinalizedHeight")} } @@ -370,7 +370,7 @@ func (_c *MockClientI_GetLatestFinalizedHeight_Call) RunAndReturn(run func() (ui return _c } - +// GetLatestHeight provides a mock function with given fields: func (_m *MockClientI) GetLatestHeight() (uint64, error) { ret := _m.Called() @@ -398,12 +398,12 @@ func (_m *MockClientI) GetLatestHeight() (uint64, error) { return r0, r1 } - +// MockClientI_GetLatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestHeight' type MockClientI_GetLatestHeight_Call struct { *mock.Call } - +// GetLatestHeight is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetLatestHeight() *MockClientI_GetLatestHeight_Call { return &MockClientI_GetLatestHeight_Call{Call: _e.mock.On("GetLatestHeight")} } @@ -425,7 +425,7 @@ func (_c *MockClientI_GetLatestHeight_Call) RunAndReturn(run func() (uint64, err return _c } - +// GetNextProposer provides a mock function with given fields: func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { ret := _m.Called() @@ -455,12 +455,12 @@ func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { return r0, r1 } - +// MockClientI_GetNextProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposer' type MockClientI_GetNextProposer_Call struct { *mock.Call } - +// GetNextProposer is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetNextProposer() *MockClientI_GetNextProposer_Call { return &MockClientI_GetNextProposer_Call{Call: _e.mock.On("GetNextProposer")} } @@ -482,7 +482,7 @@ func (_c *MockClientI_GetNextProposer_Call) RunAndReturn(run func() (*types.Sequ return _c } - +// GetObsoleteDrs provides a mock function with given fields: func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { ret := _m.Called() @@ -512,12 +512,12 @@ func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { return r0, r1 } - +// MockClientI_GetObsoleteDrs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetObsoleteDrs' type MockClientI_GetObsoleteDrs_Call struct { *mock.Call } - +// GetObsoleteDrs is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetObsoleteDrs() *MockClientI_GetObsoleteDrs_Call { return &MockClientI_GetObsoleteDrs_Call{Call: _e.mock.On("GetObsoleteDrs")} } @@ -539,7 +539,7 @@ func (_c *MockClientI_GetObsoleteDrs_Call) RunAndReturn(run func() ([]uint32, er return _c } - +// GetProposerAtHeight provides a mock function with given fields: height func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ret := _m.Called(height) @@ -569,13 +569,13 @@ func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, erro return r0, r1 } - +// MockClientI_GetProposerAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerAtHeight' type MockClientI_GetProposerAtHeight_Call struct { *mock.Call } - - +// GetProposerAtHeight is a helper method to define mock.On call +// - height int64 func (_e *MockClientI_Expecter) GetProposerAtHeight(height interface{}) *MockClientI_GetProposerAtHeight_Call { return &MockClientI_GetProposerAtHeight_Call{Call: _e.mock.On("GetProposerAtHeight", height)} } @@ -597,7 +597,7 @@ func (_c *MockClientI_GetProposerAtHeight_Call) RunAndReturn(run func(int64) (*t return _c } - +// GetRollapp provides a mock function with given fields: func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { ret := _m.Called() @@ -627,12 +627,12 @@ func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { return r0, r1 } - +// MockClientI_GetRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollapp' type MockClientI_GetRollapp_Call struct { *mock.Call } - +// GetRollapp is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetRollapp() *MockClientI_GetRollapp_Call { return &MockClientI_GetRollapp_Call{Call: _e.mock.On("GetRollapp")} } @@ -654,7 +654,7 @@ func (_c *MockClientI_GetRollapp_Call) RunAndReturn(run func() (*types.Rollapp, return _c } - +// GetSequencerByAddress provides a mock function with given fields: address func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, error) { ret := _m.Called(address) @@ -682,13 +682,13 @@ func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, e return r0, r1 } - +// MockClientI_GetSequencerByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerByAddress' type MockClientI_GetSequencerByAddress_Call struct { *mock.Call } - - +// GetSequencerByAddress is a helper method to define mock.On call +// - address string func (_e *MockClientI_Expecter) GetSequencerByAddress(address interface{}) *MockClientI_GetSequencerByAddress_Call { return &MockClientI_GetSequencerByAddress_Call{Call: _e.mock.On("GetSequencerByAddress", address)} } @@ -710,7 +710,7 @@ func (_c *MockClientI_GetSequencerByAddress_Call) RunAndReturn(run func(string) return _c } - +// GetSignerBalance provides a mock function with given fields: func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { ret := _m.Called() @@ -738,12 +738,12 @@ func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { return r0, r1 } - +// MockClientI_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' type MockClientI_GetSignerBalance_Call struct { *mock.Call } - +// GetSignerBalance is a helper method to define mock.On call func (_e *MockClientI_Expecter) GetSignerBalance() *MockClientI_GetSignerBalance_Call { return &MockClientI_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -765,7 +765,7 @@ func (_c *MockClientI_GetSignerBalance_Call) RunAndReturn(run func() (types.Bala return _c } - +// Init provides a mock function with given fields: config, rollappId, _a2, logger, options func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pubsub.Server, logger types.Logger, options ...settlement.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -790,17 +790,17 @@ func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pub return r0 } - +// MockClientI_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' type MockClientI_Init_Call struct { *mock.Call } - - - - - - +// Init is a helper method to define mock.On call +// - config settlement.Config +// - rollappId string +// - _a2 *pubsub.Server +// - logger types.Logger +// - options ...settlement.Option func (_e *MockClientI_Expecter) Init(config interface{}, rollappId interface{}, _a2 interface{}, logger interface{}, options ...interface{}) *MockClientI_Init_Call { return &MockClientI_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, rollappId, _a2, logger}, options...)...)} @@ -829,7 +829,7 @@ func (_c *MockClientI_Init_Call) RunAndReturn(run func(settlement.Config, string return _c } - +// Start provides a mock function with given fields: func (_m *MockClientI) Start() error { ret := _m.Called() @@ -847,12 +847,12 @@ func (_m *MockClientI) Start() error { return r0 } - +// MockClientI_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' type MockClientI_Start_Call struct { *mock.Call } - +// Start is a helper method to define mock.On call func (_e *MockClientI_Expecter) Start() *MockClientI_Start_Call { return &MockClientI_Start_Call{Call: _e.mock.On("Start")} } @@ -874,7 +874,7 @@ func (_c *MockClientI_Start_Call) RunAndReturn(run func() error) *MockClientI_St return _c } - +// Stop provides a mock function with given fields: func (_m *MockClientI) Stop() error { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockClientI) Stop() error { return r0 } - +// MockClientI_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' type MockClientI_Stop_Call struct { *mock.Call } - +// Stop is a helper method to define mock.On call func (_e *MockClientI_Expecter) Stop() *MockClientI_Stop_Call { return &MockClientI_Stop_Call{Call: _e.mock.On("Stop")} } @@ -919,7 +919,7 @@ func (_c *MockClientI_Stop_Call) RunAndReturn(run func() error) *MockClientI_Sto return _c } - +// SubmitBatch provides a mock function with given fields: batch, daClient, daResult func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { ret := _m.Called(batch, daClient, daResult) @@ -937,15 +937,15 @@ func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daRes return r0 } - +// MockClientI_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' type MockClientI_SubmitBatch_Call struct { *mock.Call } - - - - +// SubmitBatch is a helper method to define mock.On call +// - batch *types.Batch +// - daClient da.Client +// - daResult *da.ResultSubmitBatch func (_e *MockClientI_Expecter) SubmitBatch(batch interface{}, daClient interface{}, daResult interface{}) *MockClientI_SubmitBatch_Call { return &MockClientI_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch, daClient, daResult)} } @@ -967,7 +967,7 @@ func (_c *MockClientI_SubmitBatch_Call) RunAndReturn(run func(*types.Batch, da.C return _c } - +// ValidateGenesisBridgeData provides a mock function with given fields: data func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error { ret := _m.Called(data) @@ -985,13 +985,13 @@ func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) return r0 } - +// MockClientI_ValidateGenesisBridgeData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridgeData' type MockClientI_ValidateGenesisBridgeData_Call struct { *mock.Call } - - +// ValidateGenesisBridgeData is a helper method to define mock.On call +// - data rollapp.GenesisBridgeData func (_e *MockClientI_Expecter) ValidateGenesisBridgeData(data interface{}) *MockClientI_ValidateGenesisBridgeData_Call { return &MockClientI_ValidateGenesisBridgeData_Call{Call: _e.mock.On("ValidateGenesisBridgeData", data)} } @@ -1013,8 +1013,8 @@ func (_c *MockClientI_ValidateGenesisBridgeData_Call) RunAndReturn(run func(roll return _c } - - +// NewMockClientI creates a new instance of MockClientI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockClientI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go index 8ee0e6d75..5035e135f 100644 --- a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go +++ b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package store @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - +// MockStore is an autogenerated mock type for the Store type type MockStore struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } - +// Close provides a mock function with given fields: func (_m *MockStore) Close() error { ret := _m.Called() @@ -44,12 +44,12 @@ func (_m *MockStore) Close() error { return r0 } - +// MockStore_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' type MockStore_Close_Call struct { *mock.Call } - +// Close is a helper method to define mock.On call func (_e *MockStore_Expecter) Close() *MockStore_Close_Call { return &MockStore_Close_Call{Call: _e.mock.On("Close")} } @@ -71,7 +71,7 @@ func (_c *MockStore_Close_Call) RunAndReturn(run func() error) *MockStore_Close_ return _c } - +// LoadBaseHeight provides a mock function with given fields: func (_m *MockStore) LoadBaseHeight() (uint64, error) { ret := _m.Called() @@ -99,12 +99,12 @@ func (_m *MockStore) LoadBaseHeight() (uint64, error) { return r0, r1 } - +// MockStore_LoadBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBaseHeight' type MockStore_LoadBaseHeight_Call struct { *mock.Call } - +// LoadBaseHeight is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadBaseHeight() *MockStore_LoadBaseHeight_Call { return &MockStore_LoadBaseHeight_Call{Call: _e.mock.On("LoadBaseHeight")} } @@ -126,7 +126,7 @@ func (_c *MockStore_LoadBaseHeight_Call) RunAndReturn(run func() (uint64, error) return _c } - +// LoadBlock provides a mock function with given fields: height func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { ret := _m.Called(height) @@ -156,13 +156,13 @@ func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { return r0, r1 } - +// MockStore_LoadBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlock' type MockStore_LoadBlock_Call struct { *mock.Call } - - +// LoadBlock is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadBlock(height interface{}) *MockStore_LoadBlock_Call { return &MockStore_LoadBlock_Call{Call: _e.mock.On("LoadBlock", height)} } @@ -184,7 +184,7 @@ func (_c *MockStore_LoadBlock_Call) RunAndReturn(run func(uint64) (*types.Block, return _c } - +// LoadBlockByHash provides a mock function with given fields: hash func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { ret := _m.Called(hash) @@ -214,13 +214,13 @@ func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return r0, r1 } - +// MockStore_LoadBlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockByHash' type MockStore_LoadBlockByHash_Call struct { *mock.Call } - - +// LoadBlockByHash is a helper method to define mock.On call +// - hash [32]byte func (_e *MockStore_Expecter) LoadBlockByHash(hash interface{}) *MockStore_LoadBlockByHash_Call { return &MockStore_LoadBlockByHash_Call{Call: _e.mock.On("LoadBlockByHash", hash)} } @@ -242,7 +242,7 @@ func (_c *MockStore_LoadBlockByHash_Call) RunAndReturn(run func([32]byte) (*type return _c } - +// LoadBlockCid provides a mock function with given fields: height func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { ret := _m.Called(height) @@ -270,13 +270,13 @@ func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { return r0, r1 } - +// MockStore_LoadBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockCid' type MockStore_LoadBlockCid_Call struct { *mock.Call } - - +// LoadBlockCid is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadBlockCid(height interface{}) *MockStore_LoadBlockCid_Call { return &MockStore_LoadBlockCid_Call{Call: _e.mock.On("LoadBlockCid", height)} } @@ -298,7 +298,7 @@ func (_c *MockStore_LoadBlockCid_Call) RunAndReturn(run func(uint64) (cid.Cid, e return _c } - +// LoadBlockResponses provides a mock function with given fields: height func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, error) { ret := _m.Called(height) @@ -328,13 +328,13 @@ func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, er return r0, r1 } - +// MockStore_LoadBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockResponses' type MockStore_LoadBlockResponses_Call struct { *mock.Call } - - +// LoadBlockResponses is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadBlockResponses(height interface{}) *MockStore_LoadBlockResponses_Call { return &MockStore_LoadBlockResponses_Call{Call: _e.mock.On("LoadBlockResponses", height)} } @@ -356,7 +356,7 @@ func (_c *MockStore_LoadBlockResponses_Call) RunAndReturn(run func(uint64) (*sta return _c } - +// LoadBlockSource provides a mock function with given fields: height func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { ret := _m.Called(height) @@ -384,13 +384,13 @@ func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { return r0, r1 } - +// MockStore_LoadBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSource' type MockStore_LoadBlockSource_Call struct { *mock.Call } - - +// LoadBlockSource is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadBlockSource(height interface{}) *MockStore_LoadBlockSource_Call { return &MockStore_LoadBlockSource_Call{Call: _e.mock.On("LoadBlockSource", height)} } @@ -412,7 +412,7 @@ func (_c *MockStore_LoadBlockSource_Call) RunAndReturn(run func(uint64) (types.B return _c } - +// LoadBlockSyncBaseHeight provides a mock function with given fields: func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { ret := _m.Called() @@ -440,12 +440,12 @@ func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { return r0, r1 } - +// MockStore_LoadBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSyncBaseHeight' type MockStore_LoadBlockSyncBaseHeight_Call struct { *mock.Call } - +// LoadBlockSyncBaseHeight is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadBlockSyncBaseHeight() *MockStore_LoadBlockSyncBaseHeight_Call { return &MockStore_LoadBlockSyncBaseHeight_Call{Call: _e.mock.On("LoadBlockSyncBaseHeight")} } @@ -467,7 +467,7 @@ func (_c *MockStore_LoadBlockSyncBaseHeight_Call) RunAndReturn(run func() (uint6 return _c } - +// LoadCommit provides a mock function with given fields: height func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { ret := _m.Called(height) @@ -497,13 +497,13 @@ func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { return r0, r1 } - +// MockStore_LoadCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommit' type MockStore_LoadCommit_Call struct { *mock.Call } - - +// LoadCommit is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadCommit(height interface{}) *MockStore_LoadCommit_Call { return &MockStore_LoadCommit_Call{Call: _e.mock.On("LoadCommit", height)} } @@ -525,7 +525,7 @@ func (_c *MockStore_LoadCommit_Call) RunAndReturn(run func(uint64) (*types.Commi return _c } - +// LoadCommitByHash provides a mock function with given fields: hash func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { ret := _m.Called(hash) @@ -555,13 +555,13 @@ func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return r0, r1 } - +// MockStore_LoadCommitByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommitByHash' type MockStore_LoadCommitByHash_Call struct { *mock.Call } - - +// LoadCommitByHash is a helper method to define mock.On call +// - hash [32]byte func (_e *MockStore_Expecter) LoadCommitByHash(hash interface{}) *MockStore_LoadCommitByHash_Call { return &MockStore_LoadCommitByHash_Call{Call: _e.mock.On("LoadCommitByHash", hash)} } @@ -583,7 +583,7 @@ func (_c *MockStore_LoadCommitByHash_Call) RunAndReturn(run func([32]byte) (*typ return _c } - +// LoadDRSVersion provides a mock function with given fields: height func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { ret := _m.Called(height) @@ -611,13 +611,13 @@ func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { return r0, r1 } - +// MockStore_LoadDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadDRSVersion' type MockStore_LoadDRSVersion_Call struct { *mock.Call } - - +// LoadDRSVersion is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadDRSVersion(height interface{}) *MockStore_LoadDRSVersion_Call { return &MockStore_LoadDRSVersion_Call{Call: _e.mock.On("LoadDRSVersion", height)} } @@ -639,7 +639,7 @@ func (_c *MockStore_LoadDRSVersion_Call) RunAndReturn(run func(uint64) (uint32, return _c } - +// LoadIndexerBaseHeight provides a mock function with given fields: func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { ret := _m.Called() @@ -667,12 +667,12 @@ func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { return r0, r1 } - +// MockStore_LoadIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadIndexerBaseHeight' type MockStore_LoadIndexerBaseHeight_Call struct { *mock.Call } - +// LoadIndexerBaseHeight is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadIndexerBaseHeight() *MockStore_LoadIndexerBaseHeight_Call { return &MockStore_LoadIndexerBaseHeight_Call{Call: _e.mock.On("LoadIndexerBaseHeight")} } @@ -694,7 +694,7 @@ func (_c *MockStore_LoadIndexerBaseHeight_Call) RunAndReturn(run func() (uint64, return _c } - +// LoadLastBlockSequencerSet provides a mock function with given fields: func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { ret := _m.Called() @@ -724,12 +724,12 @@ func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { return r0, r1 } - +// MockStore_LoadLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadLastBlockSequencerSet' type MockStore_LoadLastBlockSequencerSet_Call struct { *mock.Call } - +// LoadLastBlockSequencerSet is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadLastBlockSequencerSet() *MockStore_LoadLastBlockSequencerSet_Call { return &MockStore_LoadLastBlockSequencerSet_Call{Call: _e.mock.On("LoadLastBlockSequencerSet")} } @@ -751,7 +751,7 @@ func (_c *MockStore_LoadLastBlockSequencerSet_Call) RunAndReturn(run func() (typ return _c } - +// LoadProposer provides a mock function with given fields: height func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { ret := _m.Called(height) @@ -779,13 +779,13 @@ func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { return r0, r1 } - +// MockStore_LoadProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadProposer' type MockStore_LoadProposer_Call struct { *mock.Call } - - +// LoadProposer is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) LoadProposer(height interface{}) *MockStore_LoadProposer_Call { return &MockStore_LoadProposer_Call{Call: _e.mock.On("LoadProposer", height)} } @@ -807,7 +807,7 @@ func (_c *MockStore_LoadProposer_Call) RunAndReturn(run func(uint64) (types.Sequ return _c } - +// LoadState provides a mock function with given fields: func (_m *MockStore) LoadState() (*types.State, error) { ret := _m.Called() @@ -837,12 +837,12 @@ func (_m *MockStore) LoadState() (*types.State, error) { return r0, r1 } - +// MockStore_LoadState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadState' type MockStore_LoadState_Call struct { *mock.Call } - +// LoadState is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadState() *MockStore_LoadState_Call { return &MockStore_LoadState_Call{Call: _e.mock.On("LoadState")} } @@ -864,7 +864,7 @@ func (_c *MockStore_LoadState_Call) RunAndReturn(run func() (*types.State, error return _c } - +// LoadValidationHeight provides a mock function with given fields: func (_m *MockStore) LoadValidationHeight() (uint64, error) { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockStore) LoadValidationHeight() (uint64, error) { return r0, r1 } - +// MockStore_LoadValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadValidationHeight' type MockStore_LoadValidationHeight_Call struct { *mock.Call } - +// LoadValidationHeight is a helper method to define mock.On call func (_e *MockStore_Expecter) LoadValidationHeight() *MockStore_LoadValidationHeight_Call { return &MockStore_LoadValidationHeight_Call{Call: _e.mock.On("LoadValidationHeight")} } @@ -919,7 +919,7 @@ func (_c *MockStore_LoadValidationHeight_Call) RunAndReturn(run func() (uint64, return _c } - +// NewBatch provides a mock function with given fields: func (_m *MockStore) NewBatch() store.KVBatch { ret := _m.Called() @@ -939,12 +939,12 @@ func (_m *MockStore) NewBatch() store.KVBatch { return r0 } - +// MockStore_NewBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBatch' type MockStore_NewBatch_Call struct { *mock.Call } - +// NewBatch is a helper method to define mock.On call func (_e *MockStore_Expecter) NewBatch() *MockStore_NewBatch_Call { return &MockStore_NewBatch_Call{Call: _e.mock.On("NewBatch")} } @@ -966,7 +966,7 @@ func (_c *MockStore_NewBatch_Call) RunAndReturn(run func() store.KVBatch) *MockS return _c } - +// PruneStore provides a mock function with given fields: to, logger func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { ret := _m.Called(to, logger) @@ -994,14 +994,14 @@ func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) return r0, r1 } - +// MockStore_PruneStore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PruneStore' type MockStore_PruneStore_Call struct { *mock.Call } - - - +// PruneStore is a helper method to define mock.On call +// - to uint64 +// - logger types.Logger func (_e *MockStore_Expecter) PruneStore(to interface{}, logger interface{}) *MockStore_PruneStore_Call { return &MockStore_PruneStore_Call{Call: _e.mock.On("PruneStore", to, logger)} } @@ -1023,7 +1023,7 @@ func (_c *MockStore_PruneStore_Call) RunAndReturn(run func(uint64, types.Logger) return _c } - +// RemoveBlockCid provides a mock function with given fields: height func (_m *MockStore) RemoveBlockCid(height uint64) error { ret := _m.Called(height) @@ -1041,13 +1041,13 @@ func (_m *MockStore) RemoveBlockCid(height uint64) error { return r0 } - +// MockStore_RemoveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveBlockCid' type MockStore_RemoveBlockCid_Call struct { *mock.Call } - - +// RemoveBlockCid is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) RemoveBlockCid(height interface{}) *MockStore_RemoveBlockCid_Call { return &MockStore_RemoveBlockCid_Call{Call: _e.mock.On("RemoveBlockCid", height)} } @@ -1069,7 +1069,7 @@ func (_c *MockStore_RemoveBlockCid_Call) RunAndReturn(run func(uint64) error) *M return _c } - +// SaveBaseHeight provides a mock function with given fields: height func (_m *MockStore) SaveBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1087,13 +1087,13 @@ func (_m *MockStore) SaveBaseHeight(height uint64) error { return r0 } - +// MockStore_SaveBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBaseHeight' type MockStore_SaveBaseHeight_Call struct { *mock.Call } - - +// SaveBaseHeight is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) SaveBaseHeight(height interface{}) *MockStore_SaveBaseHeight_Call { return &MockStore_SaveBaseHeight_Call{Call: _e.mock.On("SaveBaseHeight", height)} } @@ -1115,7 +1115,7 @@ func (_c *MockStore_SaveBaseHeight_Call) RunAndReturn(run func(uint64) error) *M return _c } - +// SaveBlock provides a mock function with given fields: block, commit, batch func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(block, commit, batch) @@ -1145,15 +1145,15 @@ func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch s return r0, r1 } - +// MockStore_SaveBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlock' type MockStore_SaveBlock_Call struct { *mock.Call } - - - - +// SaveBlock is a helper method to define mock.On call +// - block *types.Block +// - commit *types.Commit +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveBlock(block interface{}, commit interface{}, batch interface{}) *MockStore_SaveBlock_Call { return &MockStore_SaveBlock_Call{Call: _e.mock.On("SaveBlock", block, commit, batch)} } @@ -1175,7 +1175,7 @@ func (_c *MockStore_SaveBlock_Call) RunAndReturn(run func(*types.Block, *types.C return _c } - +// SaveBlockCid provides a mock function with given fields: height, _a1, batch func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, _a1, batch) @@ -1205,15 +1205,15 @@ func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatc return r0, r1 } - +// MockStore_SaveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockCid' type MockStore_SaveBlockCid_Call struct { *mock.Call } - - - - +// SaveBlockCid is a helper method to define mock.On call +// - height uint64 +// - _a1 cid.Cid +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveBlockCid(height interface{}, _a1 interface{}, batch interface{}) *MockStore_SaveBlockCid_Call { return &MockStore_SaveBlockCid_Call{Call: _e.mock.On("SaveBlockCid", height, _a1, batch)} } @@ -1235,7 +1235,7 @@ func (_c *MockStore_SaveBlockCid_Call) RunAndReturn(run func(uint64, cid.Cid, st return _c } - +// SaveBlockResponses provides a mock function with given fields: height, responses, batch func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResponses, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, responses, batch) @@ -1265,15 +1265,15 @@ func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResp return r0, r1 } - +// MockStore_SaveBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockResponses' type MockStore_SaveBlockResponses_Call struct { *mock.Call } - - - - +// SaveBlockResponses is a helper method to define mock.On call +// - height uint64 +// - responses *state.ABCIResponses +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveBlockResponses(height interface{}, responses interface{}, batch interface{}) *MockStore_SaveBlockResponses_Call { return &MockStore_SaveBlockResponses_Call{Call: _e.mock.On("SaveBlockResponses", height, responses, batch)} } @@ -1295,7 +1295,7 @@ func (_c *MockStore_SaveBlockResponses_Call) RunAndReturn(run func(uint64, *stat return _c } - +// SaveBlockSource provides a mock function with given fields: height, source, batch func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, source, batch) @@ -1325,15 +1325,15 @@ func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, ba return r0, r1 } - +// MockStore_SaveBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSource' type MockStore_SaveBlockSource_Call struct { *mock.Call } - - - - +// SaveBlockSource is a helper method to define mock.On call +// - height uint64 +// - source types.BlockSource +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveBlockSource(height interface{}, source interface{}, batch interface{}) *MockStore_SaveBlockSource_Call { return &MockStore_SaveBlockSource_Call{Call: _e.mock.On("SaveBlockSource", height, source, batch)} } @@ -1355,7 +1355,7 @@ func (_c *MockStore_SaveBlockSource_Call) RunAndReturn(run func(uint64, types.Bl return _c } - +// SaveBlockSyncBaseHeight provides a mock function with given fields: height func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1373,13 +1373,13 @@ func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { return r0 } - +// MockStore_SaveBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSyncBaseHeight' type MockStore_SaveBlockSyncBaseHeight_Call struct { *mock.Call } - - +// SaveBlockSyncBaseHeight is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) SaveBlockSyncBaseHeight(height interface{}) *MockStore_SaveBlockSyncBaseHeight_Call { return &MockStore_SaveBlockSyncBaseHeight_Call{Call: _e.mock.On("SaveBlockSyncBaseHeight", height)} } @@ -1401,7 +1401,7 @@ func (_c *MockStore_SaveBlockSyncBaseHeight_Call) RunAndReturn(run func(uint64) return _c } - +// SaveDRSVersion provides a mock function with given fields: height, version, batch func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, version, batch) @@ -1431,15 +1431,15 @@ func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.K return r0, r1 } - +// MockStore_SaveDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveDRSVersion' type MockStore_SaveDRSVersion_Call struct { *mock.Call } - - - - +// SaveDRSVersion is a helper method to define mock.On call +// - height uint64 +// - version uint32 +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveDRSVersion(height interface{}, version interface{}, batch interface{}) *MockStore_SaveDRSVersion_Call { return &MockStore_SaveDRSVersion_Call{Call: _e.mock.On("SaveDRSVersion", height, version, batch)} } @@ -1461,7 +1461,7 @@ func (_c *MockStore_SaveDRSVersion_Call) RunAndReturn(run func(uint64, uint32, s return _c } - +// SaveIndexerBaseHeight provides a mock function with given fields: height func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1479,13 +1479,13 @@ func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { return r0 } - +// MockStore_SaveIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveIndexerBaseHeight' type MockStore_SaveIndexerBaseHeight_Call struct { *mock.Call } - - +// SaveIndexerBaseHeight is a helper method to define mock.On call +// - height uint64 func (_e *MockStore_Expecter) SaveIndexerBaseHeight(height interface{}) *MockStore_SaveIndexerBaseHeight_Call { return &MockStore_SaveIndexerBaseHeight_Call{Call: _e.mock.On("SaveIndexerBaseHeight", height)} } @@ -1507,7 +1507,7 @@ func (_c *MockStore_SaveIndexerBaseHeight_Call) RunAndReturn(run func(uint64) er return _c } - +// SaveLastBlockSequencerSet provides a mock function with given fields: sequencers, batch func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(sequencers, batch) @@ -1537,14 +1537,14 @@ func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batc return r0, r1 } - +// MockStore_SaveLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastBlockSequencerSet' type MockStore_SaveLastBlockSequencerSet_Call struct { *mock.Call } - - - +// SaveLastBlockSequencerSet is a helper method to define mock.On call +// - sequencers types.Sequencers +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveLastBlockSequencerSet(sequencers interface{}, batch interface{}) *MockStore_SaveLastBlockSequencerSet_Call { return &MockStore_SaveLastBlockSequencerSet_Call{Call: _e.mock.On("SaveLastBlockSequencerSet", sequencers, batch)} } @@ -1566,7 +1566,7 @@ func (_c *MockStore_SaveLastBlockSequencerSet_Call) RunAndReturn(run func(types. return _c } - +// SaveProposer provides a mock function with given fields: height, proposer, batch func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, proposer, batch) @@ -1596,15 +1596,15 @@ func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch return r0, r1 } - +// MockStore_SaveProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveProposer' type MockStore_SaveProposer_Call struct { *mock.Call } - - - - +// SaveProposer is a helper method to define mock.On call +// - height uint64 +// - proposer types.Sequencer +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveProposer(height interface{}, proposer interface{}, batch interface{}) *MockStore_SaveProposer_Call { return &MockStore_SaveProposer_Call{Call: _e.mock.On("SaveProposer", height, proposer, batch)} } @@ -1626,7 +1626,7 @@ func (_c *MockStore_SaveProposer_Call) RunAndReturn(run func(uint64, types.Seque return _c } - +// SaveState provides a mock function with given fields: _a0, batch func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(_a0, batch) @@ -1656,14 +1656,14 @@ func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVB return r0, r1 } - +// MockStore_SaveState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveState' type MockStore_SaveState_Call struct { *mock.Call } - - - +// SaveState is a helper method to define mock.On call +// - _a0 *types.State +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveState(_a0 interface{}, batch interface{}) *MockStore_SaveState_Call { return &MockStore_SaveState_Call{Call: _e.mock.On("SaveState", _a0, batch)} } @@ -1685,7 +1685,7 @@ func (_c *MockStore_SaveState_Call) RunAndReturn(run func(*types.State, store.KV return _c } - +// SaveValidationHeight provides a mock function with given fields: height, batch func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, batch) @@ -1715,14 +1715,14 @@ func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (s return r0, r1 } - +// MockStore_SaveValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveValidationHeight' type MockStore_SaveValidationHeight_Call struct { *mock.Call } - - - +// SaveValidationHeight is a helper method to define mock.On call +// - height uint64 +// - batch store.KVBatch func (_e *MockStore_Expecter) SaveValidationHeight(height interface{}, batch interface{}) *MockStore_SaveValidationHeight_Call { return &MockStore_SaveValidationHeight_Call{Call: _e.mock.On("SaveValidationHeight", height, batch)} } @@ -1744,8 +1744,8 @@ func (_c *MockStore_SaveValidationHeight_Call) RunAndReturn(run func(uint64, sto return _c } - - +// NewMockStore creates a new instance of MockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockStore(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go index 775ec233d..c2ce005b5 100644 --- a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package types @@ -12,7 +12,7 @@ import ( types "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - +// MockQueryClient is an autogenerated mock type for the QueryClient type type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - +// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *types.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *typ return r0, r1 } - +// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } - - - - +// GetNextProposerByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryGetNextProposerByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } - +// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.Q return r0, r1 } - +// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } - - - - +// GetProposerByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryGetProposerByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } - +// Params provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequest, opts ...grpc.CallOption) (*types.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequ return r0, r1 } - +// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' type MockQueryClient_Params_Call struct { *mock.Call } - - - - +// Params is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryParamsRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *t return _c } - +// Sequencer provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequencerRequest, opts ...grpc.CallOption) (*types.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequ return r0, r1 } - +// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' type MockQueryClient_Sequencer_Call struct { *mock.Call } - - - - +// Sequencer is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryGetSequencerRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } - +// Sequencers provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequencersRequest, opts ...grpc.CallOption) (*types.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequen return r0, r1 } - +// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' type MockQueryClient_Sequencers_Call struct { *mock.Call } - - - - +// Sequencers is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QuerySequencersRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } - +// SequencersByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.Qu return r0, r1 } - +// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } - - - - +// SequencersByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryGetSequencersByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } - +// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *types.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } - +// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } - - - - +// SequencersByRollappByStatus is a helper method to define mock.On call +// - ctx context.Context +// - in *types.QueryGetSequencersByRollappByStatusRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -543,8 +543,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } - - +// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go index c73eb7ea5..80d7ab986 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package rollapp @@ -12,7 +12,7 @@ import ( rollapp "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - +// MockQueryClient is an autogenerated mock type for the QueryClient type type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - +// LatestHeight provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGetLatestHeightRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestHeightResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGe return r0, r1 } - +// MockQueryClient_LatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestHeight' type MockQueryClient_LatestHeight_Call struct { *mock.Call } - - - - +// LatestHeight is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryGetLatestHeightRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) LatestHeight(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestHeight_Call { return &MockQueryClient_LatestHeight_Call{Call: _e.mock.On("LatestHeight", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_LatestHeight_Call) RunAndReturn(run func(context.Conte return _c } - +// LatestStateIndex provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.QueryGetLatestStateIndexRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestStateIndexResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.Que return r0, r1 } - +// MockQueryClient_LatestStateIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestStateIndex' type MockQueryClient_LatestStateIndex_Call struct { *mock.Call } - - - - +// LatestStateIndex is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryGetLatestStateIndexRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) LatestStateIndex(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestStateIndex_Call { return &MockQueryClient_LatestStateIndex_Call{Call: _e.mock.On("LatestStateIndex", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_LatestStateIndex_Call) RunAndReturn(run func(context.C return _c } - +// ObsoleteDRSVersions provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp.QueryObsoleteDRSVersionsRequest, opts ...grpc.CallOption) (*rollapp.QueryObsoleteDRSVersionsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp. return r0, r1 } - +// MockQueryClient_ObsoleteDRSVersions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObsoleteDRSVersions' type MockQueryClient_ObsoleteDRSVersions_Call struct { *mock.Call } - - - - +// ObsoleteDRSVersions is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryObsoleteDRSVersionsRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) ObsoleteDRSVersions(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ObsoleteDRSVersions_Call { return &MockQueryClient_ObsoleteDRSVersions_Call{Call: _e.mock.On("ObsoleteDRSVersions", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_ObsoleteDRSVersions_Call) RunAndReturn(run func(contex return _c } - +// Params provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRequest, opts ...grpc.CallOption) (*rollapp.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRe return r0, r1 } - +// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' type MockQueryClient_Params_Call struct { *mock.Call } - - - - +// Params is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryParamsRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *r return _c } - +// RegisteredDenoms provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.QueryRegisteredDenomsRequest, opts ...grpc.CallOption) (*rollapp.QueryRegisteredDenomsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.Que return r0, r1 } - +// MockQueryClient_RegisteredDenoms_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisteredDenoms' type MockQueryClient_RegisteredDenoms_Call struct { *mock.Call } - - - - +// RegisteredDenoms is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryRegisteredDenomsRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) RegisteredDenoms(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RegisteredDenoms_Call { return &MockQueryClient_RegisteredDenoms_Call{Call: _e.mock.On("RegisteredDenoms", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_RegisteredDenoms_Call) RunAndReturn(run func(context.C return _c } - +// Rollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRoll return r0, r1 } - +// MockQueryClient_Rollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rollapp' type MockQueryClient_Rollapp_Call struct { *mock.Call } - - - - +// Rollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryGetRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Rollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Rollapp_Call { return &MockQueryClient_Rollapp_Call{Call: _e.mock.On("Rollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Rollapp_Call) RunAndReturn(run func(context.Context, * return _c } - +// RollappAll provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryAllRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllR return r0, r1 } - +// MockQueryClient_RollappAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappAll' type MockQueryClient_RollappAll_Call struct { *mock.Call } - - - - +// RollappAll is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryAllRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) RollappAll(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappAll_Call { return &MockQueryClient_RollappAll_Call{Call: _e.mock.On("RollappAll", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_RollappAll_Call) RunAndReturn(run func(context.Context return _c } - +// RollappByEIP155 provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.QueryGetRollappByEIP155Request, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.Quer return r0, r1 } - +// MockQueryClient_RollappByEIP155_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappByEIP155' type MockQueryClient_RollappByEIP155_Call struct { *mock.Call } - - - - +// RollappByEIP155 is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryGetRollappByEIP155Request +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) RollappByEIP155(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappByEIP155_Call { return &MockQueryClient_RollappByEIP155_Call{Call: _e.mock.On("RollappByEIP155", append([]interface{}{ctx, in}, opts...)...)} @@ -617,7 +617,7 @@ func (_c *MockQueryClient_RollappByEIP155_Call) RunAndReturn(run func(context.Co return _c } - +// StateInfo provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetStateInfoRequest, opts ...grpc.CallOption) (*rollapp.QueryGetStateInfoResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -654,15 +654,15 @@ func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetSt return r0, r1 } - +// MockQueryClient_StateInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateInfo' type MockQueryClient_StateInfo_Call struct { *mock.Call } - - - - +// StateInfo is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryGetStateInfoRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) StateInfo(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_StateInfo_Call { return &MockQueryClient_StateInfo_Call{Call: _e.mock.On("StateInfo", append([]interface{}{ctx, in}, opts...)...)} @@ -691,7 +691,7 @@ func (_c *MockQueryClient_StateInfo_Call) RunAndReturn(run func(context.Context, return _c } - +// ValidateGenesisBridge provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollapp.QueryValidateGenesisBridgeRequest, opts ...grpc.CallOption) (*rollapp.QueryValidateGenesisBridgeResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -728,15 +728,15 @@ func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollap return r0, r1 } - +// MockQueryClient_ValidateGenesisBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridge' type MockQueryClient_ValidateGenesisBridge_Call struct { *mock.Call } - - - - +// ValidateGenesisBridge is a helper method to define mock.On call +// - ctx context.Context +// - in *rollapp.QueryValidateGenesisBridgeRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) ValidateGenesisBridge(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ValidateGenesisBridge_Call { return &MockQueryClient_ValidateGenesisBridge_Call{Call: _e.mock.On("ValidateGenesisBridge", append([]interface{}{ctx, in}, opts...)...)} @@ -765,8 +765,8 @@ func (_c *MockQueryClient_ValidateGenesisBridge_Call) RunAndReturn(run func(cont return _c } - - +// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go index 0b76b1a9b..af5bcaf4b 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package sequencer @@ -12,7 +12,7 @@ import ( sequencer "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - +// MockQueryClient is an autogenerated mock type for the QueryClient type type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - +// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *sequencer.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *seq return r0, r1 } - +// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } - - - - +// GetNextProposerByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryGetNextProposerByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } - +// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequencer.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequenc return r0, r1 } - +// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } - - - - +// GetProposerByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryGetProposerByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } - +// Params provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParamsRequest, opts ...grpc.CallOption) (*sequencer.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParams return r0, r1 } - +// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' type MockQueryClient_Params_Call struct { *mock.Call } - - - - +// Params is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryParamsRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *s return _c } - +// Proposers provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryProposersRequest, opts ...grpc.CallOption) (*sequencer.QueryProposersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryPro return r0, r1 } - +// MockQueryClient_Proposers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Proposers' type MockQueryClient_Proposers_Call struct { *mock.Call } - - - - +// Proposers is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryProposersRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Proposers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Proposers_Call { return &MockQueryClient_Proposers_Call{Call: _e.mock.On("Proposers", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Proposers_Call) RunAndReturn(run func(context.Context, return _c } - +// Sequencer provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGetSequencerRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGet return r0, r1 } - +// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' type MockQueryClient_Sequencer_Call struct { *mock.Call } - - - - +// Sequencer is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryGetSequencerRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } - +// Sequencers provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySequencersRequest, opts ...grpc.CallOption) (*sequencer.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySe return r0, r1 } - +// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' type MockQueryClient_Sequencers_Call struct { *mock.Call } - - - - +// Sequencers is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QuerySequencersRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("Sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } - +// SequencersByRollapp provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequencer.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequence return r0, r1 } - +// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } - - - - +// SequencersByRollapp is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryGetSequencersByRollappRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } - +// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *sequencer.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } - +// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } - - - - +// SequencersByRollappByStatus is a helper method to define mock.On call +// - ctx context.Context +// - in *sequencer.QueryGetSequencersByRollappByStatusRequest +// - opts ...grpc.CallOption func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -617,8 +617,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } - - +// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go index db13fb1e2..7393ef94e 100644 --- a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go +++ b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package types @@ -7,7 +7,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) - +// MockApplication is an autogenerated mock type for the Application type type MockApplication struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockApplication) EXPECT() *MockApplication_Expecter { return &MockApplication_Expecter{mock: &_m.Mock} } - +// ApplySnapshotChunk provides a mock function with given fields: _a0 func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { ret := _m.Called(_a0) @@ -38,13 +38,13 @@ func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChun return r0 } - +// MockApplication_ApplySnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySnapshotChunk' type MockApplication_ApplySnapshotChunk_Call struct { *mock.Call } - - +// ApplySnapshotChunk is a helper method to define mock.On call +// - _a0 types.RequestApplySnapshotChunk func (_e *MockApplication_Expecter) ApplySnapshotChunk(_a0 interface{}) *MockApplication_ApplySnapshotChunk_Call { return &MockApplication_ApplySnapshotChunk_Call{Call: _e.mock.On("ApplySnapshotChunk", _a0)} } @@ -66,7 +66,7 @@ func (_c *MockApplication_ApplySnapshotChunk_Call) RunAndReturn(run func(types.R return _c } - +// BeginBlock provides a mock function with given fields: _a0 func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock { ret := _m.Called(_a0) @@ -84,13 +84,13 @@ func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.Respons return r0 } - +// MockApplication_BeginBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlock' type MockApplication_BeginBlock_Call struct { *mock.Call } - - +// BeginBlock is a helper method to define mock.On call +// - _a0 types.RequestBeginBlock func (_e *MockApplication_Expecter) BeginBlock(_a0 interface{}) *MockApplication_BeginBlock_Call { return &MockApplication_BeginBlock_Call{Call: _e.mock.On("BeginBlock", _a0)} } @@ -112,7 +112,7 @@ func (_c *MockApplication_BeginBlock_Call) RunAndReturn(run func(types.RequestBe return _c } - +// CheckTx provides a mock function with given fields: _a0 func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx { ret := _m.Called(_a0) @@ -130,13 +130,13 @@ func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheck return r0 } - +// MockApplication_CheckTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckTx' type MockApplication_CheckTx_Call struct { *mock.Call } - - +// CheckTx is a helper method to define mock.On call +// - _a0 types.RequestCheckTx func (_e *MockApplication_Expecter) CheckTx(_a0 interface{}) *MockApplication_CheckTx_Call { return &MockApplication_CheckTx_Call{Call: _e.mock.On("CheckTx", _a0)} } @@ -158,7 +158,7 @@ func (_c *MockApplication_CheckTx_Call) RunAndReturn(run func(types.RequestCheck return _c } - +// Commit provides a mock function with given fields: func (_m *MockApplication) Commit() types.ResponseCommit { ret := _m.Called() @@ -176,12 +176,12 @@ func (_m *MockApplication) Commit() types.ResponseCommit { return r0 } - +// MockApplication_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' type MockApplication_Commit_Call struct { *mock.Call } - +// Commit is a helper method to define mock.On call func (_e *MockApplication_Expecter) Commit() *MockApplication_Commit_Call { return &MockApplication_Commit_Call{Call: _e.mock.On("Commit")} } @@ -203,7 +203,7 @@ func (_c *MockApplication_Commit_Call) RunAndReturn(run func() types.ResponseCom return _c } - +// DeliverTx provides a mock function with given fields: _a0 func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx { ret := _m.Called(_a0) @@ -221,13 +221,13 @@ func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseD return r0 } - +// MockApplication_DeliverTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTx' type MockApplication_DeliverTx_Call struct { *mock.Call } - - +// DeliverTx is a helper method to define mock.On call +// - _a0 types.RequestDeliverTx func (_e *MockApplication_Expecter) DeliverTx(_a0 interface{}) *MockApplication_DeliverTx_Call { return &MockApplication_DeliverTx_Call{Call: _e.mock.On("DeliverTx", _a0)} } @@ -249,7 +249,7 @@ func (_c *MockApplication_DeliverTx_Call) RunAndReturn(run func(types.RequestDel return _c } - +// EndBlock provides a mock function with given fields: _a0 func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock { ret := _m.Called(_a0) @@ -267,13 +267,13 @@ func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEnd return r0 } - +// MockApplication_EndBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlock' type MockApplication_EndBlock_Call struct { *mock.Call } - - +// EndBlock is a helper method to define mock.On call +// - _a0 types.RequestEndBlock func (_e *MockApplication_Expecter) EndBlock(_a0 interface{}) *MockApplication_EndBlock_Call { return &MockApplication_EndBlock_Call{Call: _e.mock.On("EndBlock", _a0)} } @@ -295,7 +295,7 @@ func (_c *MockApplication_EndBlock_Call) RunAndReturn(run func(types.RequestEndB return _c } - +// Info provides a mock function with given fields: _a0 func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { ret := _m.Called(_a0) @@ -313,13 +313,13 @@ func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { return r0 } - +// MockApplication_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' type MockApplication_Info_Call struct { *mock.Call } - - +// Info is a helper method to define mock.On call +// - _a0 types.RequestInfo func (_e *MockApplication_Expecter) Info(_a0 interface{}) *MockApplication_Info_Call { return &MockApplication_Info_Call{Call: _e.mock.On("Info", _a0)} } @@ -341,7 +341,7 @@ func (_c *MockApplication_Info_Call) RunAndReturn(run func(types.RequestInfo) ty return _c } - +// InitChain provides a mock function with given fields: _a0 func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain { ret := _m.Called(_a0) @@ -359,13 +359,13 @@ func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseI return r0 } - +// MockApplication_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' type MockApplication_InitChain_Call struct { *mock.Call } - - +// InitChain is a helper method to define mock.On call +// - _a0 types.RequestInitChain func (_e *MockApplication_Expecter) InitChain(_a0 interface{}) *MockApplication_InitChain_Call { return &MockApplication_InitChain_Call{Call: _e.mock.On("InitChain", _a0)} } @@ -387,7 +387,7 @@ func (_c *MockApplication_InitChain_Call) RunAndReturn(run func(types.RequestIni return _c } - +// ListSnapshots provides a mock function with given fields: _a0 func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots { ret := _m.Called(_a0) @@ -405,13 +405,13 @@ func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.R return r0 } - +// MockApplication_ListSnapshots_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSnapshots' type MockApplication_ListSnapshots_Call struct { *mock.Call } - - +// ListSnapshots is a helper method to define mock.On call +// - _a0 types.RequestListSnapshots func (_e *MockApplication_Expecter) ListSnapshots(_a0 interface{}) *MockApplication_ListSnapshots_Call { return &MockApplication_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0)} } @@ -433,7 +433,7 @@ func (_c *MockApplication_ListSnapshots_Call) RunAndReturn(run func(types.Reques return _c } - +// LoadSnapshotChunk provides a mock function with given fields: _a0 func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { ret := _m.Called(_a0) @@ -451,13 +451,13 @@ func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) return r0 } - +// MockApplication_LoadSnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadSnapshotChunk' type MockApplication_LoadSnapshotChunk_Call struct { *mock.Call } - - +// LoadSnapshotChunk is a helper method to define mock.On call +// - _a0 types.RequestLoadSnapshotChunk func (_e *MockApplication_Expecter) LoadSnapshotChunk(_a0 interface{}) *MockApplication_LoadSnapshotChunk_Call { return &MockApplication_LoadSnapshotChunk_Call{Call: _e.mock.On("LoadSnapshotChunk", _a0)} } @@ -479,7 +479,7 @@ func (_c *MockApplication_LoadSnapshotChunk_Call) RunAndReturn(run func(types.Re return _c } - +// OfferSnapshot provides a mock function with given fields: _a0 func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot { ret := _m.Called(_a0) @@ -497,13 +497,13 @@ func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.R return r0 } - +// MockApplication_OfferSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OfferSnapshot' type MockApplication_OfferSnapshot_Call struct { *mock.Call } - - +// OfferSnapshot is a helper method to define mock.On call +// - _a0 types.RequestOfferSnapshot func (_e *MockApplication_Expecter) OfferSnapshot(_a0 interface{}) *MockApplication_OfferSnapshot_Call { return &MockApplication_OfferSnapshot_Call{Call: _e.mock.On("OfferSnapshot", _a0)} } @@ -525,7 +525,7 @@ func (_c *MockApplication_OfferSnapshot_Call) RunAndReturn(run func(types.Reques return _c } - +// Query provides a mock function with given fields: _a0 func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { ret := _m.Called(_a0) @@ -543,13 +543,13 @@ func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { return r0 } - +// MockApplication_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' type MockApplication_Query_Call struct { *mock.Call } - - +// Query is a helper method to define mock.On call +// - _a0 types.RequestQuery func (_e *MockApplication_Expecter) Query(_a0 interface{}) *MockApplication_Query_Call { return &MockApplication_Query_Call{Call: _e.mock.On("Query", _a0)} } @@ -571,7 +571,7 @@ func (_c *MockApplication_Query_Call) RunAndReturn(run func(types.RequestQuery) return _c } - +// SetOption provides a mock function with given fields: _a0 func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseSetOption { ret := _m.Called(_a0) @@ -589,13 +589,13 @@ func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseS return r0 } - +// MockApplication_SetOption_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetOption' type MockApplication_SetOption_Call struct { *mock.Call } - - +// SetOption is a helper method to define mock.On call +// - _a0 types.RequestSetOption func (_e *MockApplication_Expecter) SetOption(_a0 interface{}) *MockApplication_SetOption_Call { return &MockApplication_SetOption_Call{Call: _e.mock.On("SetOption", _a0)} } @@ -617,8 +617,8 @@ func (_c *MockApplication_SetOption_Call) RunAndReturn(run func(types.RequestSet return _c } - - +// NewMockApplication creates a new instance of MockApplication. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockApplication(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go index fc03566e5..9ec6b2d18 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package proxy @@ -9,7 +9,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) - +// MockAppConnConsensus is an autogenerated mock type for the AppConnConsensus type type MockAppConnConsensus struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConnConsensus) EXPECT() *MockAppConnConsensus_Expecter { return &MockAppConnConsensus_Expecter{mock: &_m.Mock} } - +// BeginBlockSync provides a mock function with given fields: _a0 func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0) @@ -52,13 +52,13 @@ func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*ty return r0, r1 } - +// MockAppConnConsensus_BeginBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlockSync' type MockAppConnConsensus_BeginBlockSync_Call struct { *mock.Call } - - +// BeginBlockSync is a helper method to define mock.On call +// - _a0 types.RequestBeginBlock func (_e *MockAppConnConsensus_Expecter) BeginBlockSync(_a0 interface{}) *MockAppConnConsensus_BeginBlockSync_Call { return &MockAppConnConsensus_BeginBlockSync_Call{Call: _e.mock.On("BeginBlockSync", _a0)} } @@ -80,7 +80,7 @@ func (_c *MockAppConnConsensus_BeginBlockSync_Call) RunAndReturn(run func(types. return _c } - +// CommitSync provides a mock function with given fields: func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { ret := _m.Called() @@ -110,12 +110,12 @@ func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } - +// MockAppConnConsensus_CommitSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CommitSync' type MockAppConnConsensus_CommitSync_Call struct { *mock.Call } - +// CommitSync is a helper method to define mock.On call func (_e *MockAppConnConsensus_Expecter) CommitSync() *MockAppConnConsensus_CommitSync_Call { return &MockAppConnConsensus_CommitSync_Call{Call: _e.mock.On("CommitSync")} } @@ -137,7 +137,7 @@ func (_c *MockAppConnConsensus_CommitSync_Call) RunAndReturn(run func() (*types. return _c } - +// DeliverTxAsync provides a mock function with given fields: _a0 func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { ret := _m.Called(_a0) @@ -157,13 +157,13 @@ func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abci return r0 } - +// MockAppConnConsensus_DeliverTxAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTxAsync' type MockAppConnConsensus_DeliverTxAsync_Call struct { *mock.Call } - - +// DeliverTxAsync is a helper method to define mock.On call +// - _a0 types.RequestDeliverTx func (_e *MockAppConnConsensus_Expecter) DeliverTxAsync(_a0 interface{}) *MockAppConnConsensus_DeliverTxAsync_Call { return &MockAppConnConsensus_DeliverTxAsync_Call{Call: _e.mock.On("DeliverTxAsync", _a0)} } @@ -185,7 +185,7 @@ func (_c *MockAppConnConsensus_DeliverTxAsync_Call) RunAndReturn(run func(types. return _c } - +// EndBlockSync provides a mock function with given fields: _a0 func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0) @@ -215,13 +215,13 @@ func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types. return r0, r1 } - +// MockAppConnConsensus_EndBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlockSync' type MockAppConnConsensus_EndBlockSync_Call struct { *mock.Call } - - +// EndBlockSync is a helper method to define mock.On call +// - _a0 types.RequestEndBlock func (_e *MockAppConnConsensus_Expecter) EndBlockSync(_a0 interface{}) *MockAppConnConsensus_EndBlockSync_Call { return &MockAppConnConsensus_EndBlockSync_Call{Call: _e.mock.On("EndBlockSync", _a0)} } @@ -243,7 +243,7 @@ func (_c *MockAppConnConsensus_EndBlockSync_Call) RunAndReturn(run func(types.Re return _c } - +// Error provides a mock function with given fields: func (_m *MockAppConnConsensus) Error() error { ret := _m.Called() @@ -261,12 +261,12 @@ func (_m *MockAppConnConsensus) Error() error { return r0 } - +// MockAppConnConsensus_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' type MockAppConnConsensus_Error_Call struct { *mock.Call } - +// Error is a helper method to define mock.On call func (_e *MockAppConnConsensus_Expecter) Error() *MockAppConnConsensus_Error_Call { return &MockAppConnConsensus_Error_Call{Call: _e.mock.On("Error")} } @@ -288,7 +288,7 @@ func (_c *MockAppConnConsensus_Error_Call) RunAndReturn(run func() error) *MockA return _c } - +// InitChainSync provides a mock function with given fields: _a0 func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0) @@ -318,13 +318,13 @@ func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*type return r0, r1 } - +// MockAppConnConsensus_InitChainSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChainSync' type MockAppConnConsensus_InitChainSync_Call struct { *mock.Call } - - +// InitChainSync is a helper method to define mock.On call +// - _a0 types.RequestInitChain func (_e *MockAppConnConsensus_Expecter) InitChainSync(_a0 interface{}) *MockAppConnConsensus_InitChainSync_Call { return &MockAppConnConsensus_InitChainSync_Call{Call: _e.mock.On("InitChainSync", _a0)} } @@ -346,18 +346,18 @@ func (_c *MockAppConnConsensus_InitChainSync_Call) RunAndReturn(run func(types.R return _c } - +// SetResponseCallback provides a mock function with given fields: _a0 func (_m *MockAppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } - +// MockAppConnConsensus_SetResponseCallback_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetResponseCallback' type MockAppConnConsensus_SetResponseCallback_Call struct { *mock.Call } - - +// SetResponseCallback is a helper method to define mock.On call +// - _a0 abcicli.Callback func (_e *MockAppConnConsensus_Expecter) SetResponseCallback(_a0 interface{}) *MockAppConnConsensus_SetResponseCallback_Call { return &MockAppConnConsensus_SetResponseCallback_Call{Call: _e.mock.On("SetResponseCallback", _a0)} } @@ -379,8 +379,8 @@ func (_c *MockAppConnConsensus_SetResponseCallback_Call) RunAndReturn(run func(a return _c } - - +// NewMockAppConnConsensus creates a new instance of MockAppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockAppConnConsensus(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go index ea1b7934a..affc90a4e 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go @@ -1,4 +1,4 @@ - +// Code generated by mockery v2.42.3. DO NOT EDIT. package proxy @@ -9,7 +9,7 @@ import ( proxy "github.com/tendermint/tendermint/proxy" ) - +// MockAppConns is an autogenerated mock type for the AppConns type type MockAppConns struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConns) EXPECT() *MockAppConns_Expecter { return &MockAppConns_Expecter{mock: &_m.Mock} } - +// Consensus provides a mock function with given fields: func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { ret := _m.Called() @@ -42,12 +42,12 @@ func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { return r0 } - +// MockAppConns_Consensus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consensus' type MockAppConns_Consensus_Call struct { *mock.Call } - +// Consensus is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Consensus() *MockAppConns_Consensus_Call { return &MockAppConns_Consensus_Call{Call: _e.mock.On("Consensus")} } @@ -69,7 +69,7 @@ func (_c *MockAppConns_Consensus_Call) RunAndReturn(run func() proxy.AppConnCons return _c } - +// IsRunning provides a mock function with given fields: func (_m *MockAppConns) IsRunning() bool { ret := _m.Called() @@ -87,12 +87,12 @@ func (_m *MockAppConns) IsRunning() bool { return r0 } - +// MockAppConns_IsRunning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsRunning' type MockAppConns_IsRunning_Call struct { *mock.Call } - +// IsRunning is a helper method to define mock.On call func (_e *MockAppConns_Expecter) IsRunning() *MockAppConns_IsRunning_Call { return &MockAppConns_IsRunning_Call{Call: _e.mock.On("IsRunning")} } @@ -114,7 +114,7 @@ func (_c *MockAppConns_IsRunning_Call) RunAndReturn(run func() bool) *MockAppCon return _c } - +// Mempool provides a mock function with given fields: func (_m *MockAppConns) Mempool() proxy.AppConnMempool { ret := _m.Called() @@ -134,12 +134,12 @@ func (_m *MockAppConns) Mempool() proxy.AppConnMempool { return r0 } - +// MockAppConns_Mempool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mempool' type MockAppConns_Mempool_Call struct { *mock.Call } - +// Mempool is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Mempool() *MockAppConns_Mempool_Call { return &MockAppConns_Mempool_Call{Call: _e.mock.On("Mempool")} } @@ -161,7 +161,7 @@ func (_c *MockAppConns_Mempool_Call) RunAndReturn(run func() proxy.AppConnMempoo return _c } - +// OnReset provides a mock function with given fields: func (_m *MockAppConns) OnReset() error { ret := _m.Called() @@ -179,12 +179,12 @@ func (_m *MockAppConns) OnReset() error { return r0 } - +// MockAppConns_OnReset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnReset' type MockAppConns_OnReset_Call struct { *mock.Call } - +// OnReset is a helper method to define mock.On call func (_e *MockAppConns_Expecter) OnReset() *MockAppConns_OnReset_Call { return &MockAppConns_OnReset_Call{Call: _e.mock.On("OnReset")} } @@ -206,7 +206,7 @@ func (_c *MockAppConns_OnReset_Call) RunAndReturn(run func() error) *MockAppConn return _c } - +// OnStart provides a mock function with given fields: func (_m *MockAppConns) OnStart() error { ret := _m.Called() @@ -224,12 +224,12 @@ func (_m *MockAppConns) OnStart() error { return r0 } - +// MockAppConns_OnStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStart' type MockAppConns_OnStart_Call struct { *mock.Call } - +// OnStart is a helper method to define mock.On call func (_e *MockAppConns_Expecter) OnStart() *MockAppConns_OnStart_Call { return &MockAppConns_OnStart_Call{Call: _e.mock.On("OnStart")} } @@ -251,17 +251,17 @@ func (_c *MockAppConns_OnStart_Call) RunAndReturn(run func() error) *MockAppConn return _c } - +// OnStop provides a mock function with given fields: func (_m *MockAppConns) OnStop() { _m.Called() } - +// MockAppConns_OnStop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStop' type MockAppConns_OnStop_Call struct { *mock.Call } - +// OnStop is a helper method to define mock.On call func (_e *MockAppConns_Expecter) OnStop() *MockAppConns_OnStop_Call { return &MockAppConns_OnStop_Call{Call: _e.mock.On("OnStop")} } @@ -283,7 +283,7 @@ func (_c *MockAppConns_OnStop_Call) RunAndReturn(run func()) *MockAppConns_OnSto return _c } - +// Query provides a mock function with given fields: func (_m *MockAppConns) Query() proxy.AppConnQuery { ret := _m.Called() @@ -303,12 +303,12 @@ func (_m *MockAppConns) Query() proxy.AppConnQuery { return r0 } - +// MockAppConns_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' type MockAppConns_Query_Call struct { *mock.Call } - +// Query is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Query() *MockAppConns_Query_Call { return &MockAppConns_Query_Call{Call: _e.mock.On("Query")} } @@ -330,7 +330,7 @@ func (_c *MockAppConns_Query_Call) RunAndReturn(run func() proxy.AppConnQuery) * return _c } - +// Quit provides a mock function with given fields: func (_m *MockAppConns) Quit() <-chan struct{} { ret := _m.Called() @@ -350,12 +350,12 @@ func (_m *MockAppConns) Quit() <-chan struct{} { return r0 } - +// MockAppConns_Quit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Quit' type MockAppConns_Quit_Call struct { *mock.Call } - +// Quit is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Quit() *MockAppConns_Quit_Call { return &MockAppConns_Quit_Call{Call: _e.mock.On("Quit")} } @@ -377,7 +377,7 @@ func (_c *MockAppConns_Quit_Call) RunAndReturn(run func() <-chan struct{}) *Mock return _c } - +// Reset provides a mock function with given fields: func (_m *MockAppConns) Reset() error { ret := _m.Called() @@ -395,12 +395,12 @@ func (_m *MockAppConns) Reset() error { return r0 } - +// MockAppConns_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' type MockAppConns_Reset_Call struct { *mock.Call } - +// Reset is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Reset() *MockAppConns_Reset_Call { return &MockAppConns_Reset_Call{Call: _e.mock.On("Reset")} } @@ -422,18 +422,18 @@ func (_c *MockAppConns_Reset_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } - +// SetLogger provides a mock function with given fields: _a0 func (_m *MockAppConns) SetLogger(_a0 log.Logger) { _m.Called(_a0) } - +// MockAppConns_SetLogger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLogger' type MockAppConns_SetLogger_Call struct { *mock.Call } - - +// SetLogger is a helper method to define mock.On call +// - _a0 log.Logger func (_e *MockAppConns_Expecter) SetLogger(_a0 interface{}) *MockAppConns_SetLogger_Call { return &MockAppConns_SetLogger_Call{Call: _e.mock.On("SetLogger", _a0)} } @@ -455,7 +455,7 @@ func (_c *MockAppConns_SetLogger_Call) RunAndReturn(run func(log.Logger)) *MockA return _c } - +// Snapshot provides a mock function with given fields: func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { ret := _m.Called() @@ -475,12 +475,12 @@ func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { return r0 } - +// MockAppConns_Snapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Snapshot' type MockAppConns_Snapshot_Call struct { *mock.Call } - +// Snapshot is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Snapshot() *MockAppConns_Snapshot_Call { return &MockAppConns_Snapshot_Call{Call: _e.mock.On("Snapshot")} } @@ -502,7 +502,7 @@ func (_c *MockAppConns_Snapshot_Call) RunAndReturn(run func() proxy.AppConnSnaps return _c } - +// Start provides a mock function with given fields: func (_m *MockAppConns) Start() error { ret := _m.Called() @@ -520,12 +520,12 @@ func (_m *MockAppConns) Start() error { return r0 } - +// MockAppConns_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' type MockAppConns_Start_Call struct { *mock.Call } - +// Start is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Start() *MockAppConns_Start_Call { return &MockAppConns_Start_Call{Call: _e.mock.On("Start")} } @@ -547,7 +547,7 @@ func (_c *MockAppConns_Start_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } - +// Stop provides a mock function with given fields: func (_m *MockAppConns) Stop() error { ret := _m.Called() @@ -565,12 +565,12 @@ func (_m *MockAppConns) Stop() error { return r0 } - +// MockAppConns_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' type MockAppConns_Stop_Call struct { *mock.Call } - +// Stop is a helper method to define mock.On call func (_e *MockAppConns_Expecter) Stop() *MockAppConns_Stop_Call { return &MockAppConns_Stop_Call{Call: _e.mock.On("Stop")} } @@ -592,7 +592,7 @@ func (_c *MockAppConns_Stop_Call) RunAndReturn(run func() error) *MockAppConns_S return _c } - +// String provides a mock function with given fields: func (_m *MockAppConns) String() string { ret := _m.Called() @@ -610,12 +610,12 @@ func (_m *MockAppConns) String() string { return r0 } - +// MockAppConns_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' type MockAppConns_String_Call struct { *mock.Call } - +// String is a helper method to define mock.On call func (_e *MockAppConns_Expecter) String() *MockAppConns_String_Call { return &MockAppConns_String_Call{Call: _e.mock.On("String")} } @@ -637,8 +637,8 @@ func (_c *MockAppConns_String_Call) RunAndReturn(run func() string) *MockAppConn return _c } - - +// NewMockAppConns creates a new instance of MockAppConns. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. func NewMockAppConns(t interface { mock.TestingT Cleanup(func()) diff --git a/node/events/types.go b/node/events/types.go index 069ee116c..3af471f10 100644 --- a/node/events/types.go +++ b/node/events/types.go @@ -6,24 +6,24 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) - +// Type Keys const ( - + // NodeTypeKey is a reserved composite key for event name. NodeTypeKey = "node.event" ) - +// Types const ( HealthStatus = "HealthStatus" ) - +// Convenience var HealthStatusList = map[string][]string{NodeTypeKey: {HealthStatus}} type DataHealthStatus struct { - + // Error is the error that was encountered in case of a health check failure. Nil implies healthy. Error error } @@ -31,6 +31,6 @@ func (dhs DataHealthStatus) String() string { return fmt.Sprintf("DataHealthStatus{Error: %v}", dhs.Error) } - +// Queries var QueryHealthStatus = uevent.QueryFor(NodeTypeKey, HealthStatus) diff --git a/node/mempool/mempool.go b/node/mempool/mempool.go index 1d7c53310..80477193c 100644 --- a/node/mempool/mempool.go +++ b/node/mempool/mempool.go @@ -15,12 +15,12 @@ const ( type MempoolIDs struct { mtx tmsync.RWMutex peerMap map[peer.ID]uint16 - nextID uint16 - activeIDs map[uint16]struct{} + nextID uint16 // assumes that a node will never have over 65536 active peers + activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter } - - +// Reserve searches for the next unused ID and assigns it to the +// peer. func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -30,8 +30,8 @@ func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.activeIDs[curID] = struct{}{} } - - +// nextPeerID returns the next unused peer ID to use. +// This assumes that ids's mutex is already locked. func (ids *MempoolIDs) nextPeerID() uint16 { if len(ids.activeIDs) == maxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) @@ -47,7 +47,7 @@ func (ids *MempoolIDs) nextPeerID() uint16 { return curID } - +// Reclaim returns the ID reserved for the peer back to unused pool. func (ids *MempoolIDs) Reclaim(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -59,7 +59,7 @@ func (ids *MempoolIDs) Reclaim(peer peer.ID) { } } - +// GetForPeer returns an ID for the peer. ID is generated if required. func (ids *MempoolIDs) GetForPeer(peer peer.ID) uint16 { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -78,6 +78,6 @@ func NewMempoolIDs() *MempoolIDs { return &MempoolIDs{ peerMap: make(map[peer.ID]uint16), activeIDs: map[uint16]struct{}{0: {}}, - nextID: 1, + nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx } } diff --git a/node/node.go b/node/node.go index 7cfe74792..f0f1a88e5 100644 --- a/node/node.go +++ b/node/node.go @@ -34,15 +34,15 @@ import ( "github.com/dymensionxyz/dymint/store" ) - +// prefixes used in KV store to separate main node data from DALC data var ( mainPrefix = []byte{0} dalcPrefix = []byte{1} indexerPrefix = []byte{2} ) - - +// Node represents a client node in Dymint network. +// It connects all the components and orchestrates their work. type Node struct { service.BaseService eventBus *tmtypes.EventBus @@ -54,7 +54,7 @@ type Node struct { conf config.NodeConfig P2P *p2p.Client - + // TODO(tzdybal): consider extracting "mempool reactor" Mempool mempool.Mempool MempoolIDs *nodemempool.MempoolIDs incomingTxCh chan *p2p.GossipMessage @@ -68,12 +68,12 @@ type Node struct { BlockIndexer indexer.BlockIndexer IndexerService *txindex.IndexerService - + // shared context for all dymint components ctx context.Context cancel context.CancelFunc } - +// NewNode creates new Dymint node. func NewNode( ctx context.Context, conf config.NodeConfig, @@ -102,12 +102,12 @@ func NewNode( var baseKV store.KV var dstore datastore.Datastore - if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { + if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { // this is used for testing logger.Info("WARNING: working in in-memory mode") baseKV = store.NewDefaultInMemoryKVStore() dstore = datastore.NewMapDatastore() } else { - + // TODO(omritoptx): Move dymint to const baseKV = store.NewKVStore(conf.RootDir, conf.DBPath, "dymint", conf.DBConfig.SyncWrites, logger) path := filepath.Join(store.Rootify(conf.RootDir, conf.DBPath), "blocksync") var err error @@ -120,9 +120,9 @@ func NewNode( s := store.New(store.NewPrefixKV(baseKV, mainPrefix)) indexerKV := store.NewPrefixKV(baseKV, indexerPrefix) - + // TODO: dalcKV is needed for mock only. Initialize only if mock used dalcKV := store.NewPrefixKV(baseKV, dalcPrefix) - + // Init the settlement layer client settlementlc := slregistry.GetClient(slregistry.Client(conf.SettlementLayer)) if settlementlc == nil { return nil, fmt.Errorf("get settlement client: named: %s", conf.SettlementLayer) @@ -161,7 +161,7 @@ func NewNode( settlementlc, eventBus, pubsubServer, - nil, + nil, // p2p client is set later dalcKV, indexerService, logger, @@ -170,7 +170,7 @@ func NewNode( return nil, fmt.Errorf("BlockManager initialization: %w", err) } - + // Set p2p client and it's validators p2pValidator := p2p.NewValidator(logger.With("module", "p2p_validator"), blockManager) p2pClient, err := p2p.NewClient(conf.P2PConfig, p2pKey, genesis.ChainID, s, pubsubServer, dstore, logger.With("module", "p2p")) if err != nil { @@ -179,7 +179,7 @@ func NewNode( p2pClient.SetTxValidator(p2pValidator.TxValidator(mp, mpIDs)) p2pClient.SetBlockValidator(p2pValidator.BlockValidator()) - + // Set p2p client in block manager blockManager.P2PClient = p2pClient ctx, cancel := context.WithCancel(ctx) @@ -209,7 +209,7 @@ func NewNode( return node, nil } - +// OnStart is a part of Service interface. func (n *Node) OnStart() error { n.Logger.Info("starting P2P client") err := n.P2P.Start(n.ctx) @@ -234,7 +234,7 @@ func (n *Node) OnStart() error { } }() - + // start the block manager err = n.BlockManager.Start(n.ctx) if err != nil { return fmt.Errorf("while starting block manager: %w", err) @@ -243,12 +243,12 @@ func (n *Node) OnStart() error { return nil } - +// GetGenesis returns entire genesis doc. func (n *Node) GetGenesis() *tmtypes.GenesisDoc { return n.genesis } - +// OnStop is a part of Service interface. func (n *Node) OnStop() { err := n.BlockManager.DAClient.Stop() if err != nil { @@ -273,32 +273,32 @@ func (n *Node) OnStop() { n.cancel() } - +// OnReset is a part of Service interface. func (n *Node) OnReset() error { panic("OnReset - not implemented!") } - +// SetLogger sets the logger used by node. func (n *Node) SetLogger(logger log.Logger) { n.Logger = logger } - +// GetLogger returns logger. func (n *Node) GetLogger() log.Logger { return n.Logger } - +// EventBus gives access to Node's event bus. func (n *Node) EventBus() *tmtypes.EventBus { return n.eventBus } - +// PubSubServer gives access to the Node's pubsub server func (n *Node) PubSubServer() *pubsub.Server { return n.PubsubServer } - +// ProxyApp returns ABCI proxy connections to communicate with application. func (n *Node) ProxyApp() proxy.AppConns { return n.proxyApp } diff --git a/p2p/block.go b/p2p/block.go index 754c17973..d6da3da96 100644 --- a/p2p/block.go +++ b/p2p/block.go @@ -6,24 +6,24 @@ import ( tmcrypto "github.com/tendermint/tendermint/crypto" ) +/* -------------------------------------------------------------------------- */ +/* Event Data */ +/* -------------------------------------------------------------------------- */ - - - - +// BlockData defines the struct of the data for each block sent via P2P type BlockData struct { - + // Block is the block that was gossiped Block types.Block - + // Commit is the commit that was gossiped Commit types.Commit } - +// MarshalBinary encodes BlockData into binary form and returns it. func (b *BlockData) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - +// UnmarshalBinary decodes binary form of p2p received block into object. func (b *BlockData) UnmarshalBinary(data []byte) error { var pbBlock pb.BlockData err := pbBlock.Unmarshal(data) @@ -34,7 +34,7 @@ func (b *BlockData) UnmarshalBinary(data []byte) error { return err } - +// ToProto converts Data into protobuf representation and returns it. func (b *BlockData) ToProto() *pb.BlockData { return &pb.BlockData{ Block: b.Block.ToProto(), @@ -42,7 +42,7 @@ func (b *BlockData) ToProto() *pb.BlockData { } } - +// FromProto fills BlockData with data from its protobuf representation. func (b *BlockData) FromProto(other *pb.BlockData) error { if err := b.Block.FromProto(other.Block); err != nil { return err @@ -53,7 +53,7 @@ func (b *BlockData) FromProto(other *pb.BlockData) error { return nil } - +// Validate run basic validation on the p2p block received func (b *BlockData) Validate(proposerPubKey tmcrypto.PubKey) error { if err := b.Block.ValidateBasic(); err != nil { return err diff --git a/p2p/block_sync.go b/p2p/block_sync.go index 49cfefb93..f8be1e2c0 100644 --- a/p2p/block_sync.go +++ b/p2p/block_sync.go @@ -20,48 +20,48 @@ import ( "github.com/libp2p/go-libp2p/core/host" ) - - - - - +// Blocksync is a protocol used to retrieve blocks on demand from the P2P network. +// Nodes store received blocks from gossip in an IPFS blockstore and nodes are able to request them on demand using bitswap protocol. +// In order to discover the identifier (CID) of each block a DHT request needs to be made for the specific block height. +// Nodes need to advertise CIDs/height map to the DHT periodically. +// https://www.notion.so/dymension/ADR-x-Rollapp-block-sync-protocol-6ee48b232a6a45e09989d67f1a6c0297?pvs=4 type BlockSync struct { - + // service that reads/writes blocks either from local datastore or the P2P network bsrv blockservice.BlockService - + // local datastore for IPFS blocks bstore blockstore.Blockstore - + // protocol used to obtain blocks from the P2P network net network.BitSwapNetwork - + // used to find all data chunks that are part of the same block dsrv BlockSyncDagService - + // used to define the content identifiers of each data chunk cidBuilder cid.Builder logger types.Logger } type BlockSyncMessageHandler func(block *BlockData) - +// SetupBlockSync initializes all services required to provide and retrieve block data in the P2P network. func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, logger types.Logger) *BlockSync { - + // construct a datastore ds := dsync.MutexWrap(store) - + // set a blockstore (to store IPFS data chunks) with the previous datastore bs := blockstore.NewBlockstore(ds) - + // initialize bitswap network used to retrieve data chunks from other peers in the P2P network bsnet := network.NewFromIpfsHost(h, &routinghelpers.Null{}, network.Prefix("/dymension/block-sync/")) - + // Bitswap server that provides data to the network. bsserver := server.New( ctx, bsnet, bs, - server.ProvideEnabled(false), + server.ProvideEnabled(false), // we don't provide blocks over DHT server.SetSendDontHaves(false), ) - + // Bitswap client that retrieves data from the network. bsclient := client.New( ctx, bsnet, @@ -71,7 +71,7 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, client.WithoutDuplicatedBlockStats(), ) - + // start the network bsnet.Start(bsserver, bsclient) bsrv := blockservice.New(bs, bsclient) @@ -93,12 +93,12 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, return blockSync } - +// SaveBlock stores the blocks produced in the DAG services to be retrievable from the P2P network. func (blocksync *BlockSync) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { return blocksync.dsrv.SaveBlock(ctx, block) } - +// LoadBlock retrieves the blocks (from the local blockstore or the network) using the DAGService to discover all data chunks that are part of the same block. func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockData, error) { blockBytes, err := blocksync.dsrv.LoadBlock(ctx, cid) if err != nil { @@ -111,7 +111,7 @@ func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockDa return block, nil } - +// RemoveBlock removes the block from the DAGservice. func (blocksync *BlockSync) DeleteBlock(ctx context.Context, cid cid.Cid) error { return blocksync.dsrv.DeleteBlock(ctx, cid) } diff --git a/p2p/block_sync_dag.go b/p2p/block_sync_dag.go index 2502d9cd5..d9df4d440 100644 --- a/p2p/block_sync_dag.go +++ b/p2p/block_sync_dag.go @@ -21,8 +21,8 @@ type BlockSyncDagService struct { cidBuilder cid.Builder } - - +// NewDAGService inits the DAGservice used to retrieve/send blocks data in the P2P. +// Block data is organized in a merkle DAG using IPLD (https://ipld.io/docs/) func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { bsDagService := &BlockSyncDagService{ cidBuilder: &cid.Prefix{ @@ -37,15 +37,15 @@ func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { return *bsDagService } - - +// SaveBlock splits the block in chunks of 256KB and it creates a new merkle DAG with them. it returns the content identifier (cid) of the root node of the DAG. +// Using the root CID the whole block can be retrieved using the DAG service func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { blockReader := bytes.NewReader(block) splitter := chunker.NewSizeSplitter(blockReader, chunker.DefaultBlockSize) nodes := []*dag.ProtoNode{} - + // the loop creates nodes for each block chunk and sets each cid for { nextData, err := splitter.NextBytes() if err == io.EOF { @@ -63,14 +63,14 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] } - + // an empty root node is created root := dag.NodeWithData(nil) err := root.SetCidBuilder(bsDagService.cidBuilder) if err != nil { return cid.Undef, err } - + // and linked to all chunks that are added to the DAGservice for _, n := range nodes { err := root.AddNodeLink(n.Cid().String(), n) @@ -90,21 +90,21 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] return root.Cid(), nil } - +// LoadBlock returns the block data obtained from the DAGService, using the root cid, either from the network or the local blockstore func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid.Cid) ([]byte, error) { - + // first it gets the root node nd, err := bsDagService.Get(ctx, cid) if err != nil { return nil, err } - + // then it gets all the data from the root node read, err := dagReader(nd, bsDagService) if err != nil { return nil, err } - + // the data is read to bytes array data, err := io.ReadAll(read) if err != nil { return nil, err @@ -113,13 +113,13 @@ func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid. } func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid cid.Cid) error { - + // first it gets the root node root, err := bsDagService.Get(ctx, cid) if err != nil { return err } - + // then it iterates all the cids to remove them from the block store for _, l := range root.Links() { err := bsDagService.Remove(ctx, l.Cid) if err != nil { @@ -129,12 +129,12 @@ func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid ci return nil } - +// dagReader is used to read the DAG (all the block chunks) from the root (IPLD) node func dagReader(root ipld.Node, ds ipld.DAGService) (io.Reader, error) { ctx := context.Background() buf := new(bytes.Buffer) - + // the loop retrieves all the nodes (block chunks) either from the local store or the network, in case it is not there. for _, l := range root.Links() { n, err := ds.Get(ctx, l.Cid) if err != nil { diff --git a/p2p/blocks_received.go b/p2p/blocks_received.go index 0541f9599..ceaf0bf67 100644 --- a/p2p/blocks_received.go +++ b/p2p/blocks_received.go @@ -2,15 +2,15 @@ package p2p import "sync" - +// BlocksReceived tracks blocks received from P2P to know what are the missing blocks that need to be requested on demand type BlocksReceived struct { blocksReceived map[uint64]struct{} latestSeenHeight uint64 - + // mutex to protect blocksReceived map access blockReceivedMu sync.Mutex } - +// addBlockReceived adds the block height to a map func (br *BlocksReceived) AddBlockReceived(height uint64) { br.latestSeenHeight = max(height, br.latestSeenHeight) br.blockReceivedMu.Lock() @@ -18,7 +18,7 @@ func (br *BlocksReceived) AddBlockReceived(height uint64) { br.blocksReceived[height] = struct{}{} } - +// isBlockReceived checks if a block height is already received func (br *BlocksReceived) IsBlockReceived(height uint64) bool { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -26,7 +26,7 @@ func (br *BlocksReceived) IsBlockReceived(height uint64) bool { return ok } - +// removeBlocksReceivedUpToHeight clears previous received block heights func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -37,7 +37,7 @@ func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { } } - +// GetLatestSeenHeight returns the latest height stored func (br *BlocksReceived) GetLatestSeenHeight() uint64 { return br.latestSeenHeight } diff --git a/p2p/client.go b/p2p/client.go index d21979efe..596669f99 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -33,29 +33,29 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// TODO(tzdybal): refactor to configuration parameters const ( - + // reAdvertisePeriod defines a period after which P2P client re-attempt advertising namespace in DHT. reAdvertisePeriod = 1 * time.Hour - + // peerLimit defines limit of number of peers returned during active peer discovery. peerLimit = 60 - + // txTopicSuffix is added after namespace to create pubsub topic for TX gossiping. txTopicSuffix = "-tx" - + // blockTopicSuffix is added after namespace to create pubsub topic for block gossiping. blockTopicSuffix = "-block" - + // blockSyncProtocolSuffix is added after namespace to create blocksync protocol prefix. blockSyncProtocolPrefix = "block-sync" ) - - - - - +// Client is a P2P client, implemented with libp2p. +// +// Initially, client connects to predefined seed nodes (aka bootnodes, bootstrap nodes). +// Those seed nodes serve Kademlia DHT protocol, and are agnostic to ORU chain. Using DHT +// peer routing and discovery clients find other peers within ORU network. type Client struct { conf config.P2PConfig chainID string @@ -71,18 +71,18 @@ type Client struct { blockGossiper *Gossiper blockValidator GossipValidator - - + // cancel is used to cancel context passed to libp2p functions + // it's required because of discovery.Advertise call cancel context.CancelFunc localPubsubServer *tmpubsub.Server logger types.Logger - + // blocksync instance used to save and retrieve blocks from the P2P network on demand blocksync *BlockSync - + // store used to store retrievable blocks using blocksync blockSyncStore datastore.Datastore store store.Store @@ -90,10 +90,10 @@ type Client struct { blocksReceived *BlocksReceived } - - - - +// NewClient creates new Client object. +// +// Basic checks on parameters are done, and default parameters are provided for unset-configuration +// TODO(tzdybal): consider passing entire config, not just P2P config, to reduce number of arguments func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, store store.Store, localPubsubServer *tmpubsub.Server, blockSyncStore datastore.Datastore, logger types.Logger) (*Client, error) { if privKey == nil { return nil, fmt.Errorf("private key: %w", gerrc.ErrNotFound) @@ -116,15 +116,15 @@ func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, st }, nil } - - - - - - - +// Start establish Client's P2P connectivity. +// +// Following steps are taken: +// 1. Setup libp2p host, start listening for incoming connections. +// 2. Setup gossibsub. +// 3. Setup DHT, establish connection to seed nodes and initialize peer discovery. +// 4. Use active peer discovery to look for peers from same ORU network. func (c *Client) Start(ctx context.Context) error { - + // create new, cancelable context ctx, c.cancel = context.WithCancel(ctx) host, err := c.listen() if err != nil { @@ -171,7 +171,7 @@ func (c *Client) StartWithHost(ctx context.Context, h host.Host) error { return nil } - +// Close gently stops Client. func (c *Client) Close() error { c.cancel() @@ -183,24 +183,24 @@ func (c *Client) Close() error { ) } - +// GossipTx sends the transaction to the P2P network. func (c *Client) GossipTx(ctx context.Context, tx []byte) error { c.logger.Debug("Gossiping transaction.", "len", len(tx)) return c.txGossiper.Publish(ctx, tx) } - +// SetTxValidator sets the callback function, that will be invoked during message gossiping. func (c *Client) SetTxValidator(val GossipValidator) { c.txValidator = val } - +// GossipBlock sends the block, and it's commit to the P2P network. func (c *Client) GossipBlock(ctx context.Context, blockBytes []byte) error { c.logger.Debug("Gossiping block.", "len", len(blockBytes)) return c.blockGossiper.Publish(ctx, blockBytes) } - +// SaveBlock stores the block in the blocksync datastore, stores locally the returned identifier and advertises the identifier to the DHT, so other nodes can know the identifier for the block height. func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, blockBytes []byte) error { if !c.conf.BlockSyncEnabled { return nil @@ -228,7 +228,7 @@ func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, return nil } - +// RemoveBlocks is used to prune blocks from the block sync datastore. func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { prunedBlocks := uint64(0) @@ -269,13 +269,13 @@ func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { return prunedBlocks, nil } - +// AdvertiseBlockIdToDHT is used to advertise the identifier (cid) for a specific block height and revision to the DHT, using a PutValue operation func (c *Client) AdvertiseBlockIdToDHT(ctx context.Context, height uint64, revision uint64, cid cid.Cid) error { err := c.DHT.PutValue(ctx, getBlockSyncKeyByHeight(height, revision), []byte(cid.String())) return err } - +// GetBlockIdFromDHT is used to retrieve the identifier (cid) for a specific block height and revision from the DHT, using a GetValue operation func (c *Client) GetBlockIdFromDHT(ctx context.Context, height uint64, revision uint64) (cid.Cid, error) { cidBytes, err := c.DHT.GetValue(ctx, getBlockSyncKeyByHeight(height, revision)) if err != nil { @@ -288,23 +288,23 @@ func (c *Client) UpdateLatestSeenHeight(height uint64) { c.blocksReceived.latestSeenHeight = max(height, c.blocksReceived.latestSeenHeight) } - +// SetBlockValidator sets the callback function, that will be invoked after block is received from P2P network. func (c *Client) SetBlockValidator(validator GossipValidator) { c.blockValidator = validator } - +// Addrs returns listen addresses of Client. func (c *Client) Addrs() []multiaddr.Multiaddr { return c.Host.Addrs() } - +// Info returns p2p info func (c *Client) Info() (p2p.ID, string, string) { return p2p.ID(hex.EncodeToString([]byte(c.Host.ID()))), c.conf.ListenAddress, c.chainID } - - +// PeerConnection describe basic information about P2P connection. +// TODO(tzdybal): move it somewhere type PeerConnection struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` @@ -312,7 +312,7 @@ type PeerConnection struct { RemoteIP string `json:"remote_ip"` } - +// Peers returns list of peers connected to Client. func (c *Client) Peers() []PeerConnection { conns := c.Host.Network().Conns() res := make([]PeerConnection, 0, len(conns)) @@ -322,12 +322,12 @@ func (c *Client) Peers() []PeerConnection { ListenAddr: c.conf.ListenAddress, Network: c.chainID, DefaultNodeID: p2p.ID(conn.RemotePeer().String()), - + // TODO(tzdybal): fill more fields }, IsOutbound: conn.Stat().Direction == network.DirOutbound, ConnectionStatus: p2p.ConnectionStatus{ Duration: time.Since(conn.Stat().Opened), - + // TODO(tzdybal): fill more fields }, RemoteIP: conn.RemoteMultiaddr().String(), } @@ -407,7 +407,7 @@ func (c *Client) peerDiscovery(ctx context.Context) error { } func (c *Client) setupPeerDiscovery(ctx context.Context) error { - + // wait for DHT select { case <-ctx.Done(): return nil @@ -443,7 +443,7 @@ func (c *Client) findPeers(ctx context.Context) error { return nil } - +// tryConnect attempts to connect to a peer and logs error if necessary func (c *Client) tryConnect(ctx context.Context, peer peer.AddrInfo) { c.logger.Debug("Trying to connect to peer.", "peer", peer) err := c.Host.Connect(ctx, peer) @@ -463,7 +463,7 @@ func (c *Client) setupGossiping(ctx context.Context) error { return err } - + // tx gossiper receives the tx to add to the mempool through validation process, since it is a joint process c.txGossiper, err = NewGossiper(c.Host, ps, c.getTxTopic(), nil, c.logger, WithValidator(c.txValidator)) if err != nil { return err @@ -502,43 +502,43 @@ func (c *Client) GetSeedAddrInfo(seedStr string) []peer.AddrInfo { return addrs } - - - - +// getNamespace returns unique string identifying ORU network. +// +// It is used to advertise/find peers in libp2p DHT. +// For now, chainID is used. func (c *Client) getNamespace() string { return c.chainID } - +// topic used to transmit transactions in gossipsub func (c *Client) getTxTopic() string { return c.getNamespace() + txTopicSuffix } - +// topic used to transmit blocks in gossipsub func (c *Client) getBlockTopic() string { return c.getNamespace() + blockTopicSuffix } - - +// NewTxValidator creates a pubsub validator that uses the node's mempool to check the +// transaction. If the transaction is valid, then it is added to the mempool func (c *Client) NewTxValidator() GossipValidator { return func(g *GossipMessage) bool { return true } } - +// blockSyncReceived is called on reception of new block via blocksync protocol func (c *Client) blockSyncReceived(block *BlockData) { err := c.localPubsubServer.PublishWithEvents(context.Background(), *block, map[string][]string{EventTypeKey: {EventNewBlockSyncBlock}}) if err != nil { c.logger.Error("Publishing event.", "err", err) } - + // Received block is cached and no longer needed to request using blocksync c.blocksReceived.AddBlockReceived(block.Block.Header.Height) } - +// blockSyncReceived is called on reception of new block via gossip protocol func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { var gossipedBlock BlockData if err := gossipedBlock.UnmarshalBinary(block); err != nil { @@ -550,7 +550,7 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { } if c.conf.BlockSyncEnabled { _, err := c.store.LoadBlockCid(gossipedBlock.Block.Header.Height) - + // skip block already added to blocksync if err == nil { return } @@ -558,13 +558,13 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { if err != nil { c.logger.Error("Adding block to blocksync store.", "err", err, "height", gossipedBlock.Block.Header.Height) } - + // Received block is cached and no longer needed to request using blocksync c.blocksReceived.AddBlockReceived(gossipedBlock.Block.Header.Height) } } - - +// bootstrapLoop is used to periodically check if the node is connected to other nodes in the P2P network, re-bootstrapping the DHT in case it is necessary, +// or to try to connect to the persistent peers func (c *Client) bootstrapLoop(ctx context.Context) { ticker := time.NewTicker(c.conf.BootstrapRetryTime) defer ticker.Stop() @@ -590,7 +590,7 @@ func (c *Client) bootstrapLoop(ctx context.Context) { } } - +// retrieveBlockSyncLoop checks if there is any block not received, previous to the latest block height received, to request it on demand func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSyncMessageHandler) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -600,7 +600,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync case <-ctx.Done(): return case <-ticker.C: - + // if no connected at p2p level, dont try if len(c.Peers()) == 0 { continue } @@ -609,8 +609,8 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync continue } - - + // this loop iterates and retrieves all the blocks between the last block applied and the greatest height received, + // skipping any block cached, since are already received. for h := state.NextHeight(); h <= c.blocksReceived.latestSeenHeight; h++ { if ctx.Err() != nil { return @@ -653,7 +653,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync } } - +// advertiseBlockSyncCids is used to advertise all the block identifiers (cids) stored in the local store to the DHT on startup func (c *Client) advertiseBlockSyncCids(ctx context.Context) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -663,7 +663,7 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - + // if no connected at p2p level, it will try again after ticker time if len(c.Peers()) == 0 { continue } @@ -693,13 +693,13 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { } } - + // just try once and then quit when finished return } } } - +// findConnection returns true in case the node is already connected to the peer specified. func (c *Client) findConnection(peer peer.AddrInfo) bool { for _, con := range c.Host.Network().Conns() { if peer.ID == con.RemotePeer() { @@ -713,7 +713,7 @@ func getBlockSyncKeyByHeight(height uint64, revision uint64) string { return "/" + blockSyncProtocolPrefix + "/" + strconv.FormatUint(revision, 10) + "/" + strconv.FormatUint(height, 10) } - +// validates that the content identifiers advertised in the DHT are valid. type blockIdValidator struct{} func (blockIdValidator) Validate(_ string, id []byte) error { diff --git a/p2p/events.go b/p2p/events.go index f88ca45e6..45a0064a5 100644 --- a/p2p/events.go +++ b/p2p/events.go @@ -4,12 +4,12 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) - - - +/* -------------------------------------------------------------------------- */ +/* Event types */ +/* -------------------------------------------------------------------------- */ const ( - + // EventTypeKey is a reserved composite key for event name. EventTypeKey = "p2p.event" ) @@ -18,12 +18,12 @@ const ( EventNewBlockSyncBlock = "NewBlockSyncBlock" ) +/* -------------------------------------------------------------------------- */ +/* Queries */ +/* -------------------------------------------------------------------------- */ - - - - +// EventQueryNewGossipedBlock is the query used for getting EventNewGossipedBlock var EventQueryNewGossipedBlock = uevent.QueryFor(EventTypeKey, EventNewGossipedBlock) - +// EventQueryNewBlockSyncBlock is the query used for getting EventNewBlockSyncBlock var EventQueryNewBlockSyncBlock = uevent.QueryFor(EventTypeKey, EventNewBlockSyncBlock) diff --git a/p2p/gossip.go b/p2p/gossip.go index 2cb7c3f65..6d4236e4c 100644 --- a/p2p/gossip.go +++ b/p2p/gossip.go @@ -13,28 +13,28 @@ import ( "github.com/dymensionxyz/dymint/types" ) - +// buffer size used by gossipSub router to consume received packets (blocks or txs). packets are dropped in case buffer overflows. in case of blocks, it can buffer up to 5 minutes (assuming 200ms block rate) const pubsubBufferSize = 3000 - +// GossipMessage represents message gossiped via P2P network (e.g. transaction, Block etc). type GossipMessage struct { Data []byte From peer.ID } - +// GossiperOption sets optional parameters of Gossiper. type GossiperOption func(*Gossiper) error type GossipMessageHandler func(ctx context.Context, gossipedBlock []byte) - +// WithValidator options registers topic validator for Gossiper. func WithValidator(validator GossipValidator) GossiperOption { return func(g *Gossiper) error { return g.ps.RegisterTopicValidator(g.topic.String(), wrapValidator(g, validator)) } } - +// Gossiper is an abstraction of P2P publish subscribe mechanism. type Gossiper struct { ownID peer.ID @@ -45,9 +45,9 @@ type Gossiper struct { logger types.Logger } - - - +// NewGossiper creates new, ready to use instance of Gossiper. +// +// Returned Gossiper object can be used for sending (Publishing) and receiving messages in topic identified by topicStr. func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler GossipMessageHandler, logger types.Logger, options ...GossiperOption) (*Gossiper, error) { topic, err := ps.Join(topicStr) if err != nil { @@ -76,7 +76,7 @@ func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler return g, nil } - +// Close is used to disconnect from topic and free resources used by Gossiper. func (g *Gossiper) Close() error { err := g.ps.UnregisterTopicValidator(g.topic.String()) g.sub.Cancel() @@ -86,12 +86,12 @@ func (g *Gossiper) Close() error { ) } - +// Publish publishes data to gossip topic. func (g *Gossiper) Publish(ctx context.Context, data []byte) error { return g.topic.Publish(ctx, data) } - +// ProcessMessages waits for messages published in the topic and execute handler. func (g *Gossiper) ProcessMessages(ctx context.Context) { for { msg, err := g.sub.Next(ctx) @@ -110,8 +110,8 @@ func (g *Gossiper) ProcessMessages(ctx context.Context) { func wrapValidator(gossiper *Gossiper, validator GossipValidator) pubsub.Validator { return func(_ context.Context, _ peer.ID, msg *pubsub.Message) bool { - - + // Make sure we don't process our own messages. + // In this case we'll want to return true but not to actually handle the message. if msg.GetFrom() == gossiper.ownID { return true } diff --git a/p2p/validator.go b/p2p/validator.go index 4c3b26c27..513714e4f 100644 --- a/p2p/validator.go +++ b/p2p/validator.go @@ -16,17 +16,17 @@ type StateGetter interface { GetRevision() uint64 } - +// GossipValidator is a callback function type. type GossipValidator func(*GossipMessage) bool - +// IValidator is an interface for implementing validators of messages gossiped in the p2p network. type IValidator interface { - - + // TxValidator creates a pubsub validator that uses the node's mempool to check the + // transaction. If the transaction is valid, then it is added to the mempool TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator } - +// Validator is a validator for messages gossiped in the p2p network. type Validator struct { logger types.Logger stateGetter StateGetter @@ -34,7 +34,7 @@ type Validator struct { var _ IValidator = (*Validator)(nil) - +// NewValidator creates a new Validator. func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { return &Validator{ logger: logger, @@ -42,9 +42,9 @@ func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { } } - - - +// TxValidator creates a pubsub validator that uses the node's mempool to check the +// transaction. +// False means the TX is considered invalid and should not be gossiped. func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator { return func(txMessage *GossipMessage) bool { v.logger.Debug("Transaction received.", "bytes", len(txMessage.Data)) @@ -59,7 +59,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo case errors.Is(err, mempool.ErrTxInCache): return true case errors.Is(err, mempool.ErrMempoolIsFull{}): - return true + return true // we have no reason to believe that we should throw away the message case errors.Is(err, mempool.ErrTxTooLarge{}): return false case errors.Is(err, mempool.ErrPreCheck{}): @@ -73,7 +73,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo } } - +// BlockValidator runs basic checks on the gossiped block func (v *Validator) BlockValidator() GossipValidator { return func(blockMsg *GossipMessage) bool { var gossipedBlock BlockData diff --git a/rpc/client/client.go b/rpc/client/client.go index e0b6b4a29..d697476fb 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -34,7 +34,7 @@ const ( defaultPerPage = 30 maxPerPage = 100 - + // TODO(tzdybal): make this configurable subscribeTimeout = 5 * time.Second ) @@ -46,20 +46,20 @@ const ( SLValidated ) - +// ErrConsensusStateNotAvailable is returned because Dymint doesn't use Tendermint consensus. var ErrConsensusStateNotAvailable = errors.New("consensus state not available in Dymint") var _ rpcclient.Client = &Client{} - - - +// Client implements tendermint RPC client interface. +// +// This is the type that is used in communication between cosmos-sdk app and Dymint. type Client struct { *tmtypes.EventBus config *config.RPCConfig node *node.Node - + // cache of chunked genesis data. genChunks []string } @@ -68,7 +68,7 @@ type ResultBlockValidated struct { Result BlockValidationStatus } - +// NewClient returns Client working with given node. func NewClient(node *node.Node) *Client { return &Client{ EventBus: node.EventBus(), @@ -77,7 +77,7 @@ func NewClient(node *node.Node) *Client { } } - +// ABCIInfo returns basic information about application state. func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := c.Query().InfoSync(proxy.RequestInfo) if err != nil { @@ -86,12 +86,12 @@ func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } - +// ABCIQuery queries for data from application. func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } - +// ABCIQueryWithOptions queries for data from application. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { resQuery, err := c.Query().QuerySync(abci.RequestQuery{ Path: path, @@ -106,19 +106,19 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } - - +// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - - - - subscriber := "" + // This implementation corresponds to Tendermints implementation from rpc/core/mempool.go. + // ctx.RemoteAddr godoc: If neither HTTPReq nor WSConn is set, an empty string is returned. + // This code is a local client, so we can assume that subscriber is "" + subscriber := "" // ctx.RemoteAddr() if err := c.IsSubscriptionAllowed(subscriber); err != nil { return nil, sdkerrors.Wrap(err, "subscription not allowed") } - + // Subscribe to tx being committed in block. subCtx, cancel := context.WithTimeout(ctx, subscribeTimeout) defer cancel() q := tmtypes.EventQueryTxFor(tx) @@ -134,7 +134,7 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } }() - + // add to mempool and wait for CheckTx result checkTxResCh := make(chan *abci.Response, 1) err = c.node.Mempool.CheckTx(tx, func(res *abci.Response) { select { @@ -159,15 +159,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. }, nil } - + // broadcast tx err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failure to broadcast: %w", err) } - + // Wait for the tx to be included in a block or timeout. select { - case msg := <-deliverTxSub.Out(): + case msg := <-deliverTxSub.Out(): // The tx was included in a block. deliverTxRes, _ := msg.Data().(tmtypes.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -201,15 +201,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } } - - - +// BroadcastTxAsync returns right away, with no response. Does not wait for +// CheckTx nor DeliverTx results. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { err := c.node.Mempool.CheckTx(tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - + // gossipTx optimistically err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failed to gossip: %w", err) @@ -217,9 +217,9 @@ func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.R return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } - - - +// BroadcastTxSync returns with the response from CheckTx. Does not wait for +// DeliverTx result. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := c.node.Mempool.CheckTx(tx, func(res *abci.Response) { @@ -231,16 +231,16 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re res := <-resCh r := res.GetCheckTx() - - - + // gossip the transaction if it's in the mempool. + // Note: we have to do this here because, unlike the tendermint mempool reactor, there + // is no routine that gossips transactions after they enter the pool if r.Code == abci.CodeTypeOK { err = c.node.P2P.GossipTx(ctx, tx) if err != nil { - - - - + // the transaction must be removed from the mempool if it cannot be gossiped. + // if this does not occur, then the user will not be able to try again using + // this node, as the CheckTx call above will return an error indicating that + // the tx is already in the mempool _ = c.node.Mempool.RemoveTxByKey(tx.Key()) return nil, fmt.Errorf("gossip tx: %w", err) } @@ -255,7 +255,7 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re }, nil } - +// Subscribe subscribe given subscriber to a query. func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCap return outc, nil } - +// Unsubscribe unsubscribes given subscriber from a query. func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := tmquery.New(query) if err != nil { @@ -292,12 +292,12 @@ func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) erro return c.EventBus.Unsubscribe(ctx, subscriber, q) } - +// Genesis returns entire genesis. func (c *Client) Genesis(_ context.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: c.node.GetGenesis()}, nil } - +// GenesisChunked returns given chunk of genesis. func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { genChunks, err := c.GetGenesisChunks() if err != nil { @@ -312,19 +312,19 @@ func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenes return nil, fmt.Errorf("service configuration error, there are no chunks") } - + // it's safe to do uint(chunkLen)-1 (no overflow) since we always have at least one chunk here if id > uint(chunkLen)-1 { return nil, fmt.Errorf("there are %d chunks, %d is invalid", chunkLen-1, id) } return &ctypes.ResultGenesisChunk{ TotalChunks: chunkLen, - ChunkNumber: int(id), + ChunkNumber: int(id), //nolint:gosec // id is always positive Data: genChunks[id], }, nil } - +// BlockchainInfo returns ABCI block meta information for given height range. func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -336,8 +336,8 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) baseHeight = 1 } minHeight, maxHeight, err = filterMinMax( - int64(baseHeight), - int64(c.node.GetBlockManagerHeight()), + int64(baseHeight), //nolint:gosec // height is non-negative and falls in int64 + int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 minHeight, maxHeight, limit) @@ -348,7 +348,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) blocks := make([]*tmtypes.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { - block, err := c.node.Store.LoadBlock(uint64(height)) + block, err := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return nil, err } @@ -362,12 +362,12 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) } return &ctypes.ResultBlockchainInfo{ - LastHeight: int64(c.node.GetBlockManagerHeight()), + LastHeight: int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 BlockMetas: blocks, }, nil } - +// NetInfo returns basic information about client P2P connections. func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { res := ctypes.ResultNetInfo{ Listening: true, @@ -389,24 +389,24 @@ func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return &res, nil } - +// DumpConsensusState always returns error as there is no consensus state in Dymint. func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return nil, ErrConsensusStateNotAvailable } - +// ConsensusState always returns error as there is no consensus state in Dymint. func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return nil, ErrConsensusStateNotAvailable } - - - +// ConsensusParams returns consensus params at given height. +// +// Currently, consensus params changes are not supported and this method returns params as defined in genesis. func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - + // TODO(tzdybal): implement consensus params handling: https://github.com/dymensionxyz/dymint/issues/291 params := c.node.GetGenesis().ConsensusParams return &ctypes.ResultConsensusParams{ - BlockHeight: int64(c.normalizeHeight(height)), + BlockHeight: int64(c.normalizeHeight(height)), //nolint:gosec // height is non-negative and falls in int64 ConsensusParams: tmproto.ConsensusParams{ Block: tmproto.BlockParams{ MaxBytes: params.Block.MaxBytes, @@ -428,14 +428,14 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re }, nil } - +// Health endpoint returns empty value. It can be used to monitor service availability. func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } - - - +// Block method returns BlockID and block itself for given height. +// +// If height is nil, it returns information about last known block. func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { heightValue := c.normalizeHeight(height) block, err := c.node.Store.LoadBlock(heightValue) @@ -459,7 +459,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, }, nil } - +// BlockByHash returns BlockID and block itself for given hash. func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { var h [32]byte copy(h[:], hash) @@ -485,13 +485,13 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl }, nil } - +// BlockResults returns information about transactions, events and updates of validator set and consensus params. func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h uint64 if height == nil { h = c.node.GetBlockManagerHeight() } else { - h = uint64(*height) + h = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 } resp, err := c.node.Store.LoadBlockResponses(h) if err != nil { @@ -499,7 +499,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } return &ctypes.ResultBlockResults{ - Height: int64(h), + Height: int64(h), //nolint:gosec // height is non-negative and falls in int64 TxsResults: resp.DeliverTxs, BeginBlockEvents: resp.BeginBlock.Events, EndBlockEvents: resp.EndBlock.Events, @@ -508,7 +508,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul }, nil } - +// Commit returns signed header (aka commit) at given height. func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { heightValue := c.normalizeHeight(height) com, err := c.node.Store.LoadCommit(heightValue) @@ -528,7 +528,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return ctypes.NewResultCommit(&block.Header, commit, true), nil } - +// Validators returns paginated list of validators at given height. func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (*ctypes.ResultValidators, error) { height := c.normalizeHeight(heightPtr) @@ -538,14 +538,14 @@ func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (* } return &ctypes.ResultValidators{ - BlockHeight: int64(height), + BlockHeight: int64(height), //nolint:gosec // height is non-negative and falls in int64 Validators: proposer.TMValidators(), Count: 1, Total: 1, }, nil } - +// Tx returns detailed information about transaction identified by its hash. func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { res, err := c.node.TxIndexer.Get(hash) if err != nil { @@ -561,8 +561,8 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul var proof tmtypes.TxProof if prove { - block, _ := c.node.Store.LoadBlock(uint64(height)) - blockProof := block.Data.Txs.Proof(int(index)) + block, _ := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 + blockProof := block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines proof = tmtypes.TxProof{ RootHash: blockProof.RootHash, Data: tmtypes.Tx(blockProof.Data), @@ -580,7 +580,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul }, nil } - +// TxSearch returns detailed information about transactions matching query. func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*ctypes.ResultTxSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -592,7 +592,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, err } - + // sort results (must be done before pagination) switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -612,7 +612,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - + // paginate results totalCount := len(results) perPage := validatePerPage(perPagePtr) @@ -629,7 +629,10 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr r := results[i] var proof tmtypes.TxProof - + /*if prove { + block := nil //env.BlockStore.LoadBlock(r.Height) + proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines + }*/ apiResults = append(apiResults, &ctypes.ResultTx{ Hash: tmtypes.Tx(r.Tx).Hash(), @@ -644,8 +647,8 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } - - +// BlockSearch defines a method to search for a paginated set of blocks by +// BeginBlock and EndBlock event search criteria. func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -657,7 +660,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, err } - + // Sort the results switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -672,7 +675,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - + // Paginate totalCount := len(results) perPageVal := validatePerPage(perPage) @@ -684,10 +687,10 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i skipCount := validateSkipCount(pageVal, perPageVal) pageSize := tmmath.MinInt(perPageVal, totalCount-skipCount) - + // Fetch the blocks blocks := make([]*ctypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { - b, err := c.node.Store.LoadBlock(uint64(results[i])) + b, err := c.node.Store.LoadBlock(uint64(results[i])) //nolint:gosec // height is non-negative and falls in int64 if err != nil { return nil, err } @@ -706,11 +709,11 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return &ctypes.ResultBlockSearch{Blocks: blocks, TotalCount: totalCount}, nil } - +// Status returns detailed information about current status of the node. func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { latest, err := c.node.Store.LoadBlock(c.node.GetBlockManagerHeight()) if err != nil { - + // TODO(tzdybal): extract error return nil, fmt.Errorf("find latest block: %w", err) } @@ -736,7 +739,7 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { txIndexerStatus := "on" result := &ctypes.ResultStatus{ - + // TODO(ItzhakBokris): update NodeInfo fields NodeInfo: p2p.DefaultNodeInfo{ ProtocolVersion: defaultProtocolVersion, DefaultNodeID: id, @@ -753,18 +756,18 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash[:], LatestAppHash: latestAppHash[:], - LatestBlockHeight: int64(latestHeight), + LatestBlockHeight: int64(latestHeight), //nolint:gosec // height is non-negative and falls in int64 LatestBlockTime: latestBlockTime, - + // CatchingUp is true if the node is not at the latest height received from p2p or da. CatchingUp: c.node.BlockManager.TargetHeight.Load() > latestHeight, - - - - - - + // TODO(tzdybal): add missing fields + // EarliestBlockHash: earliestBlockHash, + // EarliestAppHash: earliestAppHash, + // EarliestBlockHeight: earliestBloc + // kHeight, + // EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), }, - + // TODO(ItzhakBokris): update ValidatorInfo fields ValidatorInfo: ctypes.ValidatorInfo{ Address: tmbytes.HexBytes(proposer.ConsAddress()), PubKey: proposer.PubKey(), @@ -774,14 +777,14 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { return result, nil } - +// BroadcastEvidence is not yet implemented. func (c *Client) BroadcastEvidence(ctx context.Context, evidence tmtypes.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return &ctypes.ResultBroadcastEvidence{ Hash: evidence.Hash(), }, nil } - +// NumUnconfirmedTxs returns information about transactions in mempool. func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: c.node.Mempool.Size(), @@ -790,9 +793,9 @@ func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirm }, nil } - +// UnconfirmedTxs returns transactions in mempool. func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - + // reuse per_page validator limit := validatePerPage(limitPtr) txs := c.node.Mempool.ReapMaxTxs(limit) @@ -804,9 +807,9 @@ func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.Res }, nil } - - - +// CheckTx executes a new transaction against the application to determine its validity. +// +// If valid, the tx is automatically added to the mempool. func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultCheckTx, error) { res, err := c.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { @@ -817,20 +820,20 @@ func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultChec func (c *Client) BlockValidated(height *int64) (*ResultBlockValidated, error) { _, _, chainID := c.node.P2P.Info() - + // invalid height if height == nil || *height < 0 { return &ResultBlockValidated{Result: -1, ChainID: chainID}, nil } - - if uint64(*height) > c.node.BlockManager.State.Height() { + // node has not reached the height yet + if uint64(*height) > c.node.BlockManager.State.Height() { //nolint:gosec // height is non-negative and falls in int64 return &ResultBlockValidated{Result: NotValidated, ChainID: chainID}, nil } - if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { + if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { //nolint:gosec // height is non-negative and falls in int64 return &ResultBlockValidated{Result: SLValidated, ChainID: chainID}, nil } - + // block is applied, and therefore it is validated at block level but not at state update level return &ResultBlockValidated{Result: P2PValidated, ChainID: chainID}, nil } @@ -856,7 +859,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String()) sub = c.resubscribe(subscriber, q) - if sub == nil { + if sub == nil { // client was stopped return } case <-c.Quit(): @@ -865,7 +868,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm } } - +// Try to resubscribe with exponential backoff. func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscription { attempts := uint(0) for { @@ -879,7 +882,7 @@ func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscr } attempts++ - time.Sleep((10 << attempts) * time.Millisecond) + time.Sleep((10 << attempts) * time.Millisecond) // 10ms -> 20ms -> 40ms } } @@ -904,7 +907,7 @@ func (c *Client) normalizeHeight(height *int64) uint64 { if height == nil || *height == 0 { heightValue = c.node.GetBlockManagerHeight() } else { - heightValue = uint64(*height) + heightValue = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 } return heightValue @@ -921,7 +924,7 @@ func (c *Client) IsSubscriptionAllowed(subscriber string) error { } func validatePerPage(perPagePtr *int) int { - if perPagePtr == nil { + if perPagePtr == nil { // no per_page parameter return defaultPerPage } @@ -939,13 +942,13 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) } - if pagePtr == nil || *pagePtr <= 0 { + if pagePtr == nil || *pagePtr <= 0 { // no page parameter return 1, nil } pages := ((totalCount - 1) / perPage) + 1 if pages == 0 { - pages = 1 + pages = 1 // one page (even if it's empty) } page := *pagePtr if page > pages { @@ -965,12 +968,12 @@ func validateSkipCount(page, perPage int) int { } func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { - + // filter negatives if min < 0 || max < 0 { return min, max, errors.New("height must be greater than zero") } - + // adjust for default values if min == 0 { min = 1 } @@ -978,14 +981,14 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { max = height } - + // limit max to the height max = tmmath.MinInt64(height, max) - + // limit min to the base min = tmmath.MaxInt64(base, min) - - + // limit min to within `limit` of max + // so the total number of blocks returned will be `limit` min = tmmath.MaxInt64(min, max-limit+1) if min > max { diff --git a/rpc/client/utils.go b/rpc/client/utils.go index 04ec93e09..894c60547 100644 --- a/rpc/client/utils.go +++ b/rpc/client/utils.go @@ -8,12 +8,12 @@ import ( ) const ( - - - genesisChunkSize = 16 * 1024 * 1024 + // genesisChunkSize is the maximum size, in bytes, of each + // chunk in the genesis structure for the chunked API + genesisChunkSize = 16 * 1024 * 1024 // 16 MiB ) - +// GetGenesisChunks returns chunked version of genesis. func (c *Client) GetGenesisChunks() ([]string, error) { if c.genChunks != nil { return c.genChunks, nil @@ -26,8 +26,8 @@ func (c *Client) GetGenesisChunks() ([]string, error) { return c.genChunks, err } - - +// initGenesisChunks creates a chunked format of the genesis document to make it easier to +// iterate through larger genesis structures. func (c *Client) initGenesisChunks(genesis *tmtypes.GenesisDoc) error { if genesis == nil { return nil diff --git a/rpc/json/handler.go b/rpc/json/handler.go index 46d70f126..af33eed97 100644 --- a/rpc/json/handler.go +++ b/rpc/json/handler.go @@ -49,21 +49,21 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mux.ServeHTTP(w, r) } - +// serveJSONRPC serves HTTP request func (h *handler) serveJSONRPC(w http.ResponseWriter, r *http.Request) { h.serveJSONRPCforWS(w, r, nil) } - - +// serveJSONRPC serves HTTP request +// implementation is highly inspired by Gorilla RPC v2 (but simplified a lot) func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsConn *wsConn) { - + // Create a new codec request. codecReq := h.codec.NewRequest(r) - + // Get service method to be called. method, err := codecReq.Method() if err != nil { if e, ok := err.(*json2.Error); method == "" && ok && e.Message == "EOF" { - + // just serve empty page if request is empty return } codecReq.WriteError(w, http.StatusBadRequest, err) @@ -76,7 +76,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo return } - + // Decode the args. args := reflect.New(methodSpec.argsType) if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { codecReq.WriteError(w, http.StatusBadRequest, errRead) @@ -98,7 +98,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo } rets := methodSpec.m.Call(callArgs) - + // Extract the result to error if needed. var errResult error statusCode := http.StatusOK errInter := rets[1].Interface() @@ -107,11 +107,11 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo errResult, _ = errInter.(error) } - - + // Prevents Internet Explorer from MIME-sniffing a response away + // from the declared content-type w.Header().Set("x-content-type-options", "nosniff") - + // Encode the response. if errResult == nil { var raw json.RawMessage raw, err = tmjson.Marshal(rets[0].Interface()) @@ -153,7 +153,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http case reflect.String: args.Elem().Field(i).SetString(rawVal) case reflect.Slice: - + // []byte is a reflect.Slice of reflect.Uint8's if field.Type.Elem().Kind() == reflect.Uint8 { err = setByteSliceParam(rawVal, &args, i) } @@ -172,7 +172,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http args, }) - + // Extract the result to error if needed. statusCode := http.StatusOK errInter := rets[1].Interface() if errInter != nil { @@ -185,8 +185,8 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http } func (h *handler) encodeAndWriteResponse(w http.ResponseWriter, result interface{}, errResult error, statusCode int) { - - + // Prevents Internet Explorer from MIME-sniffing a response away + // from the declared content-type w.Header().Set("x-content-type-options", "nosniff") w.Header().Set("Content-Type", "application/json; charset=utf-8") diff --git a/rpc/json/service.go b/rpc/json/service.go index e1952f770..e9c1c8e08 100644 --- a/rpc/json/service.go +++ b/rpc/json/service.go @@ -20,13 +20,13 @@ import ( ) const ( - + // defaultSubscribeTimeout is the default timeout for a subscription. defaultSubscribeTimeout = 5 * time.Second - + // defaultSubscribeBufferSize is the default buffer size for a subscription. defaultSubscribeBufferSize = 100 ) - +// GetHTTPHandler returns handler configured to serve Tendermint-compatible RPC. func GetHTTPHandler(l *client.Client, logger types.Logger, opts ...option) (http.Handler, error) { return newHandler(newService(l, logger, opts...), json2.NewCodec(), logger), nil } @@ -137,9 +137,9 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } go func(subscriptionID []byte) { for msg := range out { - + // build the base response var resp rpctypes.RPCResponse - + // Check if subscriptionID is string or int and generate the rest of the response accordingly subscriptionIDInt, err := strconv.Atoi(string(subscriptionID)) if err != nil { s.logger.Info("Failed to convert subscriptionID to int") @@ -147,7 +147,7 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } else { resp = rpctypes.NewRPCSuccessResponse(rpctypes.JSONRPCIntID(subscriptionIDInt), msg) } - + // Marshal response to JSON and send it to the websocket queue jsonBytes, err := json.MarshalIndent(resp, "", " ") if err != nil { s.logger.Error("marshal RPCResponse to JSON", "err", err) @@ -180,7 +180,7 @@ func (s *service) UnsubscribeAll(req *http.Request, args *unsubscribeAllArgs) (* return &emptyResult{}, nil } - +// info API func (s *service) Health(req *http.Request, args *healthArgs) (*ctypes.ResultHealth, error) { return s.client.Health(req.Context()) } @@ -202,7 +202,7 @@ func (s *service) Genesis(req *http.Request, args *genesisArgs) (*ctypes.ResultG } func (s *service) GenesisChunked(req *http.Request, args *genesisChunkedArgs) (*ctypes.ResultGenesisChunk, error) { - return s.client.GenesisChunked(req.Context(), uint(args.ID)) + return s.client.GenesisChunked(req.Context(), uint(args.ID)) //nolint:gosec // id is always positive } func (s *service) Block(req *http.Request, args *blockArgs) (*ctypes.ResultBlock, error) { @@ -261,7 +261,7 @@ func (s *service) NumUnconfirmedTxs(req *http.Request, args *numUnconfirmedTxsAr return s.client.NumUnconfirmedTxs(req.Context()) } - +// tx broadcast API func (s *service) BroadcastTxCommit(req *http.Request, args *broadcastTxCommitArgs) (*ctypes.ResultBroadcastTxCommit, error) { return s.client.BroadcastTxCommit(req.Context(), args.Tx) } @@ -274,7 +274,7 @@ func (s *service) BroadcastTxAsync(req *http.Request, args *broadcastTxAsyncArgs return s.client.BroadcastTxAsync(req.Context(), args.Tx) } - +// abci API func (s *service) ABCIQuery(req *http.Request, args *ABCIQueryArgs) (*ctypes.ResultABCIQuery, error) { return s.client.ABCIQueryWithOptions(req.Context(), args.Path, args.Data, rpcclient.ABCIQueryOptions{ Height: int64(args.Height), @@ -286,7 +286,7 @@ func (s *service) ABCIInfo(req *http.Request, args *ABCIInfoArgs) (*ctypes.Resul return s.client.ABCIInfo(req.Context()) } - +// evidence API func (s *service) BroadcastEvidence(req *http.Request, args *broadcastEvidenceArgs) (*ctypes.ResultBroadcastEvidence, error) { return s.client.BroadcastEvidence(req.Context(), args.Evidence) } diff --git a/rpc/json/types.go b/rpc/json/types.go index 23e84dff6..19f1f8513 100644 --- a/rpc/json/types.go +++ b/rpc/json/types.go @@ -18,7 +18,7 @@ type unsubscribeArgs struct { } type unsubscribeAllArgs struct{} - +// info API type ( healthArgs struct{} statusArgs struct{} @@ -86,7 +86,7 @@ type unconfirmedTxsArgs struct { } type numUnconfirmedTxsArgs struct{} - +// tx broadcast API type broadcastTxCommitArgs struct { Tx types.Tx `json:"tx"` } @@ -97,9 +97,9 @@ type broadcastTxAsyncArgs struct { Tx types.Tx `json:"tx"` } +// abci API - - +// ABCIQueryArgs defines args for ABCI Query method. type ABCIQueryArgs struct { Path string `json:"path"` Data bytes.HexBytes `json:"data"` @@ -107,10 +107,10 @@ type ABCIQueryArgs struct { Prove bool `json:"prove"` } - +// ABCIInfoArgs defines args for ABCI Info method. type ABCIInfoArgs struct{} - +// evidence API type broadcastEvidenceArgs struct { Evidence types.Evidence `json:"evidence"` @@ -118,20 +118,20 @@ type broadcastEvidenceArgs struct { type emptyResult struct{} +// JSON-deserialization specific types - - +// StrInt is an proper int or quoted "int" type StrInt int - +// StrInt64 is an proper int64 or quoted "int64" type StrInt64 int64 - +// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt64 func (s *StrInt64) UnmarshalJSON(b []byte) error { return unmarshalStrInt64(b, s) } - +// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt func (s *StrInt) UnmarshalJSON(b []byte) error { var val StrInt64 err := unmarshalStrInt64(b, &val) diff --git a/rpc/json/ws.go b/rpc/json/ws.go index a9728e5a9..a086ba980 100644 --- a/rpc/json/ws.go +++ b/rpc/json/ws.go @@ -40,7 +40,7 @@ func (wsc *wsConn) sendLoop() { } func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { - + // TODO(tzdybal): configuration options upgrader := websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, @@ -89,7 +89,7 @@ func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { } if mt != websocket.TextMessage { - + // TODO(tzdybal): https://github.com/dymensionxyz/dymint/issues/465 h.logger.Debug("expected text message") continue } @@ -111,14 +111,14 @@ func newResponseWriter(w io.Writer) http.ResponseWriter { return &wsResponse{w} } - +// wsResponse is a simple implementation of http.ResponseWriter type wsResponse struct { w io.Writer } var _ http.ResponseWriter = wsResponse{} - +// Write use underlying writer to write response to WebSocket func (w wsResponse) Write(bytes []byte) (int, error) { return w.w.Write(bytes) } diff --git a/rpc/middleware/client.go b/rpc/middleware/client.go index 6d175fb2b..32c232564 100644 --- a/rpc/middleware/client.go +++ b/rpc/middleware/client.go @@ -6,14 +6,14 @@ import ( "github.com/tendermint/tendermint/libs/log" ) - - +// Client is a struct that holds registered middlewares and provides methods +// to run these middlewares on an HTTP handler. type Client struct { registry *Registry logger log.Logger } - +// NewClient creates and returns a new Client instance. func NewClient(reg Registry, logger log.Logger) *Client { return &Client{ registry: ®, @@ -21,7 +21,7 @@ func NewClient(reg Registry, logger log.Logger) *Client { } } - +// Handle wraps the provided http.Handler with the registered middlewares and returns the final http.Handler. func (mc *Client) Handle(h http.Handler) http.Handler { registeredMiddlewares := mc.registry.GetRegistered() finalHandler := h diff --git a/rpc/middleware/registry.go b/rpc/middleware/registry.go index 70a1b2222..9cbf9a795 100644 --- a/rpc/middleware/registry.go +++ b/rpc/middleware/registry.go @@ -12,20 +12,20 @@ var ( instance *Registry ) - +// HandlerFunc is a type alias for a function that takes an http.Handler and returns a new http.Handler. type HandlerFunc func(http.Handler) http.Handler - +// Middleware is an interface representing a middleware with a Handler method. type Middleware interface { Handler(logger log.Logger) HandlerFunc } - +// Registry is a struct that holds a list of registered middlewares. type Registry struct { middlewareList []Middleware } - +// GetRegistry returns a singleton instance of the Registry. func GetRegistry() *Registry { once.Do(func() { instance = &Registry{} @@ -33,12 +33,12 @@ func GetRegistry() *Registry { return instance } - +// Register adds a Middleware to the list of registered middlewares in the Registry. func (r *Registry) Register(m Middleware) { r.middlewareList = append(r.middlewareList, m) } - +// GetRegistered returns a list of registered middlewares. func (r *Registry) GetRegistered() []Middleware { return r.middlewareList } diff --git a/rpc/middleware/status.go b/rpc/middleware/status.go index 16172aa48..01e16e559 100644 --- a/rpc/middleware/status.go +++ b/rpc/middleware/status.go @@ -16,7 +16,7 @@ func (s Status) Handler(logger log.Logger) HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := s.Err() isHealthy := err == nil - + // in case the endpoint is health we return health response if r.URL.Path == "/health" { w.WriteHeader(http.StatusOK) diff --git a/rpc/server.go b/rpc/server.go index 9eafb9f91..6368d4ff1 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -26,7 +26,7 @@ import ( "github.com/dymensionxyz/dymint/rpc/middleware" ) - +// Server handles HTTP and JSON-RPC requests, exposing Tendermint-compatible API. type Server struct { *service.BaseService @@ -43,21 +43,21 @@ type Server struct { const ( onStopTimeout = 5 * time.Second - + // readHeaderTimeout is the timeout for reading the request headers. readHeaderTimeout = 5 * time.Second ) - +// Option is a function that configures the Server. type Option func(*Server) - +// WithListener is an option that sets the listener. func WithListener(listener net.Listener) Option { return func(d *Server) { d.listener = listener } } - +// NewServer creates new instance of Server with given configuration. func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, options ...Option) *Server { srv := &Server{ config: config, @@ -66,16 +66,16 @@ func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, opt } srv.BaseService = service.NewBaseService(logger, "RPC", srv) - + // Apply options for _, option := range options { option(srv) } return srv } - - - +// Client returns a Tendermint-compatible rpc Client instance. +// +// This method is called in cosmos-sdk. func (s *Server) Client() rpcclient.Client { return s.client } @@ -84,13 +84,13 @@ func (s *Server) PubSubServer() *pubsub.Server { return s.node.PubSubServer() } - +// OnStart is called when Server is started (see service.BaseService for details). func (s *Server) OnStart() error { s.startEventListener() return s.startRPC() } - +// OnStop is called when Server is stopped (see service.BaseService for details). func (s *Server) OnStop() { ctx, cancel := context.WithTimeout(context.Background(), onStopTimeout) defer cancel() @@ -99,12 +99,12 @@ func (s *Server) OnStop() { } } - +// startEventListener registers events to callbacks. func (s *Server) startEventListener() { go uevent.MustSubscribe(context.Background(), s.PubSubServer(), "RPCNodeHealthStatusHandler", events.QueryHealthStatus, s.onNodeHealthUpdate, s.Logger) } - +// onNodeHealthUpdate is a callback function that handles health status events from the node. func (s *Server) onNodeHealthUpdate(event pubsub.Message) { eventData, _ := event.Data().(*events.DataHealthStatus) if eventData.Error != nil { @@ -169,13 +169,13 @@ func (s *Server) startRPC() error { handler = c.Handler(handler) } - + // Apply Middleware reg := middleware.GetRegistry() reg.Register(middleware.Status{Err: s.getHealthStatus}) middlewareClient := middleware.NewClient(*reg, s.Logger.With("module", "rpc/middleware")) handler = middlewareClient.Handle(handler) - + // Start HTTP server go func() { err := s.serve(listener, handler) if !errors.Is(err, http.ErrServerClosed) { diff --git a/settlement/config.go b/settlement/config.go index 4895849fd..3f01909e0 100644 --- a/settlement/config.go +++ b/settlement/config.go @@ -5,7 +5,7 @@ import ( "time" ) - +// Config for the DymensionLayerClient type Config struct { KeyringBackend string `mapstructure:"keyring_backend"` NodeAddress string `mapstructure:"settlement_node_address"` @@ -19,9 +19,9 @@ type Config struct { RetryMinDelay time.Duration `mapstructure:"retry_min_delay"` BatchAcceptanceTimeout time.Duration `mapstructure:"batch_acceptance_timeout"` BatchAcceptanceAttempts uint `mapstructure:"batch_acceptance_attempts"` - + // For testing only. probably should be refactored ProposerPubKey string `json:"proposer_pub_key"` - + // Config used for sl shared grpc mock SLGrpc GrpcConfig `mapstructure:",squash"` } diff --git a/settlement/dymension/cosmosclient.go b/settlement/dymension/cosmosclient.go index 7feaab2f3..3e90eb499 100644 --- a/settlement/dymension/cosmosclient.go +++ b/settlement/dymension/cosmosclient.go @@ -17,10 +17,10 @@ import ( sequencertypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - - - - +// CosmosClient is an interface for interacting with cosmos client chains. +// It is a wrapper around the cosmos client in order to provide with an interface which can be implemented by +// other clients and can easily be mocked for testing purposes. +// Currently it contains only the methods that are used by the dymension hub client. type CosmosClient interface { Context() sdkclient.Context StartEventListener() error @@ -41,7 +41,7 @@ type cosmosClient struct { var _ CosmosClient = &cosmosClient{} - +// NewCosmosClient creates a new cosmos client func NewCosmosClient(client cosmosclient.Client) CosmosClient { return &cosmosClient{client} } diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index 101aab439..6a995ef69 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -38,7 +38,7 @@ const ( postBatchSubscriberPrefix = "postBatchSubscriber" ) - +// Client is the client for the Dymension Hub. type Client struct { config *settlement.Config rollappId string @@ -58,7 +58,7 @@ type Client struct { var _ settlement.ClientI = &Client{} - +// Init is called once. it initializes the struct members. func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { interfaceRegistry := cdctypes.NewInterfaceRegistry() cryptocodec.RegisterInterfaces(interfaceRegistry) @@ -76,7 +76,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub c.retryMinDelay = config.RetryMinDelay c.retryMaxDelay = config.RetryMaxDelay - + // Apply options for _, apply := range options { apply(c) } @@ -96,7 +96,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub return nil } - +// Start starts the HubClient. func (c *Client) Start() error { err := c.cosmosClient.StartEventListener() if err != nil { @@ -106,31 +106,31 @@ func (c *Client) Start() error { return nil } - +// Stop stops the HubClient. func (c *Client) Stop() error { return c.cosmosClient.StopEventListener() } - - +// SubmitBatch posts a batch to the Dymension Hub. it tries to post the batch until it is accepted by the settlement layer. +// it emits success and failure events to the event bus accordingly. func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.ResultSubmitBatch) error { msgUpdateState, err := c.convertBatchToMsgUpdateState(batch, daResult) if err != nil { return fmt.Errorf("convert batch to msg update state: %w", err) } - + // TODO: probably should be changed to be a channel, as the eventHandler is also in the HubClient in he produces the event postBatchSubscriberClient := fmt.Sprintf("%s-%d-%s", postBatchSubscriberPrefix, batch.StartHeight(), uuid.New().String()) subscription, err := c.pubsub.Subscribe(c.ctx, postBatchSubscriberClient, settlement.EventQueryNewSettlementBatchAccepted, 1000) if err != nil { return fmt.Errorf("pub sub subscribe to settlement state updates: %w", err) } - + //nolint:errcheck defer c.pubsub.UnsubscribeAll(c.ctx, postBatchSubscriberClient) for { - + // broadcast loop: broadcast the transaction to the blockchain (with infinite retries). err := c.RunWithRetryInfinitely(func() error { err := c.broadcastBatch(msgUpdateState) if err != nil { @@ -154,7 +154,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul return fmt.Errorf("broadcast batch: %w", err) } - + // Batch was submitted successfully. Wait for it to be accepted by the settlement layer. timer := time.NewTimer(c.batchAcceptanceTimeout) defer timer.Stop() attempt := uint64(1) @@ -171,20 +171,20 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul eventData, _ := event.Data().(*settlement.EventDataNewBatch) if eventData.EndHeight != batch.EndHeight() { c.logger.Debug("Received event for a different batch, ignoring.", "event", eventData) - continue + continue // continue waiting for acceptance of the current batch } c.logger.Info("Batch accepted.", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight(), "stateIndex", eventData.StateIndex, "dapath", msgUpdateState.DAPath) return nil case <-timer.C: - + // Check if the batch was accepted by the settlement layer, and we've just missed the event. includedBatch, err := c.pollForBatchInclusion(batch.EndHeight()) timer.Reset(c.batchAcceptanceTimeout) - + // no error, but still not included if err == nil && !includedBatch { attempt++ if attempt <= uint64(c.batchAcceptanceAttempts) { - continue + continue // continue waiting for acceptance of the current batch } c.logger.Error( "Timed out waiting for batch inclusion on settlement layer", @@ -193,7 +193,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "endHeight", batch.EndHeight(), ) - break + break // breaks the switch case, and goes back to the broadcast loop } if err != nil { c.logger.Error( @@ -205,13 +205,13 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "error", err, ) - continue + continue // continue waiting for acceptance of the current batch } - + // all good c.logger.Info("Batch accepted", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight()) return nil } - break + break // failed waiting for acceptance. broadcast the batch again } } } @@ -237,7 +237,7 @@ func (c *Client) getStateInfo(index, height *uint64) (res *rollapptypes.QueryGet if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { + if res == nil { // not supposed to happen return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return @@ -259,13 +259,13 @@ func (c *Client) getLatestHeight(finalized bool) (res *rollapptypes.QueryGetLate if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { + if res == nil { // not supposed to happen return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return } - +// GetLatestBatch returns the latest batch from the Dymension Hub. func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, nil) if err != nil { @@ -274,7 +274,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - +// GetBatchAtIndex returns the batch at the given index from the Dymension Hub. func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(&index, nil) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - +// GetBatchAtHeight returns the batch at the given height from the Dymension Hub. func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, &height) if err != nil { @@ -292,7 +292,7 @@ func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatc return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - +// GetLatestHeight returns the latest state update height from the settlement layer. func (c *Client) GetLatestHeight() (uint64, error) { res, err := c.getLatestHeight(false) if err != nil { @@ -301,7 +301,7 @@ func (c *Client) GetLatestHeight() (uint64, error) { return res.Height, nil } - +// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. func (c *Client) GetLatestFinalizedHeight() (uint64, error) { res, err := c.getLatestHeight(true) if err != nil { @@ -310,16 +310,16 @@ func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return res.Height, nil } - - +// GetProposerAtHeight return the proposer at height. +// In case of negative height, it will return the latest proposer. func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { - + // Get all sequencers to find the proposer address seqs, err := c.GetAllSequencers() if err != nil { return nil, fmt.Errorf("get bonded sequencers: %w", err) } - + // Get either latest proposer or proposer at height var proposerAddr string if height < 0 { proposerAddr, err = c.getLatestProposer() @@ -327,12 +327,12 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("get latest proposer: %w", err) } } else { - + // Get the state info for the relevant height and get address from there res, err := c.GetBatchAtHeight(uint64(height)) - - + // if case of height not found, it may be because it didn't arrive to the hub yet. + // In that case we want to return the current proposer. if err != nil { - + // If batch not found, fallback to latest proposer if errors.Is(err, gerrc.ErrNotFound) { proposerAddr, err = c.getLatestProposer() if err != nil { @@ -350,7 +350,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer is sentinel") } - + // Find and return the matching sequencer for _, seq := range seqs { if seq.SettlementAddress == proposerAddr { return &seq, nil @@ -359,7 +359,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer not found") } - +// GetSequencerByAddress returns a sequencer by its address. func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { var res *sequencertypes.QueryGetSequencerResponse req := &sequencertypes.QueryGetSequencerRequest{ @@ -402,7 +402,7 @@ func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) ), nil } - +// GetAllSequencers returns all sequencers of the given rollapp. func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappResponse req := &sequencertypes.QueryGetSequencersByRollappRequest{ @@ -425,7 +425,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return nil, err } - + // not supposed to happen, but just in case if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -455,7 +455,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return sequencerList, nil } - +// GetBondedSequencers returns the bonded sequencers of the given rollapp. func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappByStatusResponse req := &sequencertypes.QueryGetSequencersByRollappByStatusRequest{ @@ -479,7 +479,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return nil, err } - + // not supposed to happen, but just in case if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -508,10 +508,10 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return sequencerList, nil } - - - - +// GetNextProposer returns the next proposer on the hub. +// In case the current proposer is the next proposer, it returns nil. +// in case there is no next proposer, it returns an empty sequencer struct. +// in case there is a next proposer, it returns the next proposer. func (c *Client) GetNextProposer() (*types.Sequencer, error) { var ( nextAddr string @@ -577,7 +577,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - + // not supposed to happen, but just in case if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -586,7 +586,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return &rollapp, nil } - +// GetObsoleteDrs returns the list of deprecated DRS. func (c *Client) GetObsoleteDrs() ([]uint32, error) { var res *rollapptypes.QueryObsoleteDRSVersionsResponse req := &rollapptypes.QueryObsoleteDRSVersionsRequest{} @@ -606,7 +606,7 @@ func (c *Client) GetObsoleteDrs() ([]uint32, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - + // not supposed to happen, but just in case if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -694,7 +694,7 @@ func getCosmosClientOptions(config *settlement.Config) []cosmosclient.Option { return options } - +// pollForBatchInclusion polls the hub for the inclusion of a batch with the given end height. func (c *Client) pollForBatchInclusion(batchEndHeight uint64) (bool, error) { latestBatch, err := c.GetLatestBatch() if err != nil { @@ -768,7 +768,7 @@ func (c *Client) ValidateGenesisBridgeData(data rollapptypes.GenesisBridgeData) return fmt.Errorf("rollapp client: validate genesis bridge: %w", err) } - + // not supposed to happen, but just in case if res == nil { return fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } diff --git a/settlement/dymension/events.go b/settlement/dymension/events.go index 29280911a..ba0a2849e 100644 --- a/settlement/dymension/events.go +++ b/settlement/dymension/events.go @@ -12,7 +12,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) - +// TODO: use types and attributes from dymension proto const ( eventStateUpdateFmt = "state_update.rollapp_id='%s' AND state_update.status='PENDING'" eventStateUpdateFinalizedFmt = "state_update.rollapp_id='%s' AND state_update.status='FINALIZED'" @@ -42,7 +42,7 @@ func (c *Client) eventHandler() { eventRotationStartedQ := fmt.Sprintf(eventRotationStartedFmt, c.rollappId) eventStateUpdateFinalizedQ := fmt.Sprintf(eventStateUpdateFinalizedFmt, c.rollappId) - + // TODO: add validation callback for the event data eventMap := map[string]string{ eventStateUpdateQ: settlement.EventNewBatchAccepted, eventSequencersListQ: settlement.EventNewBondedSequencer, @@ -66,7 +66,7 @@ func (c *Client) eventHandler() { if err != nil { panic(fmt.Errorf("subscribe to events (%s): %w", eventStateUpdateFinalizedQ, err)) } - defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) + defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) //nolint:errcheck for { var e ctypes.ResultEvent @@ -74,7 +74,7 @@ func (c *Client) eventHandler() { case <-c.ctx.Done(): return case <-c.cosmosClient.EventListenerQuit(): - + // TODO(omritoptix): Fallback to polling return case e = <-stateUpdatesC: case e = <-sequencersListC: @@ -86,7 +86,7 @@ func (c *Client) eventHandler() { } func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[string]string) { - + // Assert value is in map and publish it to the event bus internalType, ok := eventMap[event.Query] if !ok { c.logger.Error("Ignoring event. Type not supported.", "event", event) @@ -105,7 +105,7 @@ func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[stri func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBatch, error) { var errs []error - + // check all expected attributes exists events := rawEventData.Events if events["state_update.num_blocks"] == nil || events["state_update.start_height"] == nil || events["state_update.state_info_index"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -137,12 +137,12 @@ func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventD } func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBondedSequencer, error) { - + // check all expected attributes exists events := rawEventData.Events if events["create_sequencer.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - + // TODO: validate rollappID if events["create_sequencer.sequencer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -154,13 +154,13 @@ func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.Ev } func convertToRotationStartedEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataRotationStarted, error) { - + // check all expected attributes exists events := rawEventData.Events if events["proposer_rotation_started.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - + // TODO: validate rollappID if events["proposer_rotation_started.next_proposer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") diff --git a/settlement/dymension/options.go b/settlement/dymension/options.go index 00cc5be2d..94ffa07c3 100644 --- a/settlement/dymension/options.go +++ b/settlement/dymension/options.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) - +// WithCosmosClient is an option that sets the CosmosClient. func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -14,7 +14,7 @@ func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { } } - +// WithRetryAttempts is an option that sets the number of attempts to retry when interacting with the settlement layer. func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -22,7 +22,7 @@ func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { } } - +// WithBatchAcceptanceTimeout is an option that sets the timeout for waiting for a batch to be accepted by the settlement layer. func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -30,7 +30,7 @@ func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement } } - +// WithBatchAcceptanceAttempts is an option that sets the number of attempts to check if a batch has been accepted by the settlement layer. func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -38,7 +38,7 @@ func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option } } - +// WithRetryMinDelay is an option that sets the retry function mindelay between hub retry attempts. func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -46,7 +46,7 @@ func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { } } - +// WithRetryMaxDelay is an option that sets the retry function max delay between hub retry attempts. func WithRetryMaxDelay(retryMaxDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) diff --git a/settlement/dymension/utils.go b/settlement/dymension/utils.go index 6dbbae0a7..def62fb91 100644 --- a/settlement/dymension/utils.go +++ b/settlement/dymension/utils.go @@ -8,8 +8,8 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - - +// RunWithRetry runs the given operation with retry, doing a number of attempts, and taking the last +// error only. It uses the context of the HubClient. func (c *Client) RunWithRetry(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), @@ -20,8 +20,8 @@ func (c *Client) RunWithRetry(operation func() error) error { ) } - - +// RunWithRetryInfinitely runs the given operation with retry, doing a number of attempts, and taking the last +// error only. It uses the context of the HubClient. func (c *Client) RunWithRetryInfinitely(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), diff --git a/settlement/errors.go b/settlement/errors.go index 55496c242..b2b4073b7 100644 --- a/settlement/errors.go +++ b/settlement/errors.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - +// ErrBatchNotAccepted is returned when a batch is not accepted by the settlement layer. var ErrBatchNotAccepted = fmt.Errorf("batch not accepted: %w", gerrc.ErrUnknown) type ErrNextSequencerAddressFraud struct { diff --git a/settlement/events.go b/settlement/events.go index 931df574f..2ff811410 100644 --- a/settlement/events.go +++ b/settlement/events.go @@ -7,17 +7,17 @@ import ( ) const ( - + // EventTypeKey is a reserved composite key for event name. EventTypeKey = "settlement.event" - + // Event types EventNewBatchAccepted = "NewBatchAccepted" EventNewBondedSequencer = "NewBondedSequencer" EventRotationStarted = "RotationStarted" EventNewBatchFinalized = "NewBatchFinalized" ) - +// Convenience objects var ( EventNewBatchAcceptedList = map[string][]string{EventTypeKey: {EventNewBatchAccepted}} EventNewBondedSequencerList = map[string][]string{EventTypeKey: {EventNewBondedSequencer}} @@ -25,7 +25,7 @@ var ( EventNewBatchFinalizedList = map[string][]string{EventTypeKey: {EventNewBatchFinalized}} ) - +// Queries var ( EventQueryNewSettlementBatchAccepted = uevent.QueryFor(EventTypeKey, EventNewBatchAccepted) EventQueryNewSettlementBatchFinalized = uevent.QueryFor(EventTypeKey, EventNewBatchFinalized) @@ -33,13 +33,13 @@ var ( EventQueryRotationStarted = uevent.QueryFor(EventTypeKey, EventRotationStarted) ) - +// Data type EventDataNewBatch struct { StartHeight uint64 - + // EndHeight is the height of the last accepted batch EndHeight uint64 - + // StateIndex is the rollapp-specific index the batch was saved in the SL StateIndex uint64 } diff --git a/settlement/grpc/grpc.go b/settlement/grpc/grpc.go index 45c5deef0..c09c72798 100644 --- a/settlement/grpc/grpc.go +++ b/settlement/grpc/grpc.go @@ -36,8 +36,8 @@ const ( addressPrefix = "dym" ) - - +// Client is an extension of the base settlement layer client +// for usage in tests and local development. type Client struct { ctx context.Context rollappID string @@ -59,14 +59,14 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { }, nil } - +// GetObsoleteDrs returns the list of deprecated DRS. func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } var _ settlement.ClientI = (*Client)(nil) - +// Init initializes the mock layer client. func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { ctx := context.Background() @@ -149,7 +149,7 @@ func initConfig(conf settlement.Config) (proposer string, err error) { return } - +// Start starts the mock client func (c *Client) Start() error { c.logger.Info("Starting grpc mock settlement") @@ -159,7 +159,7 @@ func (c *Client) Start() error { for { select { case <-c.stopchan: - + // stop return case <-tick.C: index, err := c.sl.GetIndex(c.ctx, &slmock.SLGetIndexRequest{}) @@ -185,14 +185,14 @@ func (c *Client) Start() error { return nil } - +// Stop stops the mock client func (c *Client) Stop() error { c.logger.Info("Stopping grpc mock settlement") close(c.stopchan) return nil } - +// SubmitBatch saves the batch to the kv store func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchtoSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -200,7 +200,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(10 * time.Millisecond) + time.Sleep(10 * time.Millisecond) // mimic a delay in batch acceptance err = c.pubsub.PublishWithEvents(context.Background(), &settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) if err != nil { return err @@ -208,7 +208,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return nil } - +// GetLatestBatch returns the latest batch from the kv store func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.logger.Info("GetLatestBatch grpc", "index", c.slStateIndex) batchResult, err := c.GetBatchAtIndex(atomic.LoadUint64(&c.slStateIndex)) @@ -218,7 +218,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } - +// GetBatchAtIndex returns the batch at the given index func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -230,7 +230,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, } func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { - + // Binary search implementation left, right := uint64(1), c.slStateIndex for left <= right { @@ -256,7 +256,7 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return nil, gerrc.ErrNotFound } - +// GetProposerAtHeight implements settlement.ClientI. func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -279,17 +279,17 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } - +// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in grpc SL func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in grpc SL") } - +// GetAllSequencers implements settlement.ClientI. func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } - +// GetBondedSequencers implements settlement.ClientI. func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -298,17 +298,17 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } - +// GetNextProposer implements settlement.ClientI. func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } - +// GetLatestHeight returns the latest state update height from the settlement layer. func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight.Load(), nil } - +// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } @@ -320,7 +320,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { if err != nil { return err } - + // Save the batch to the next state index c.logger.Debug("Saving batch to grpc settlement layer", "index", c.slStateIndex+1) setBatchReply, err := c.sl.SetBatch(c.ctx, &slmock.SLSetBatchRequest{Index: c.slStateIndex + 1, Batch: b}) if err != nil { @@ -337,7 +337,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { return err } c.logger.Debug("Setting grpc SL Index to ", "index", setIndexReply.GetIndex()) - + // Save latest height in memory and in store c.latestHeight.Store(batch.EndHeight) return nil } diff --git a/settlement/local/local.go b/settlement/local/local.go index 20d3ec8ee..4d8a64664 100644 --- a/settlement/local/local.go +++ b/settlement/local/local.go @@ -38,18 +38,18 @@ const ( var ( settlementKVPrefix = []byte{0} - slStateIndexKey = []byte("slStateIndex") + slStateIndexKey = []byte("slStateIndex") // used to recover after reboot ) - - +// Client is an extension of the base settlement layer client +// for usage in tests and local development. type Client struct { rollappID string ProposerPubKey string logger types.Logger pubsub *pubsub.Server - mu sync.Mutex + mu sync.Mutex // keep the following in sync with *each other* slStateIndex uint64 latestHeight uint64 settlementKV store.KV @@ -64,7 +64,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { var _ settlement.ClientI = (*Client)(nil) - +// Init initializes the mock layer client. func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { slstore, proposer, err := initConfig(config) if err != nil { @@ -77,7 +77,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub b, err := settlementKV.Get(slStateIndexKey) if err == nil { slStateIndex = binary.BigEndian.Uint64(b) - + // Get the latest height from the stateIndex var settlementBatch rollapptypes.MsgUpdateState b, err := settlementKV.Get(keyFromIndex(slStateIndex)) if err != nil { @@ -101,9 +101,9 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err error) { if conf.KeyringHomeDir == "" { - + // init store slstore = store.NewDefaultInMemoryKVStore() - + // init proposer pub key if conf.ProposerPubKey != "" { proposer = conf.ProposerPubKey } else { @@ -135,17 +135,17 @@ func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err return } - +// Start starts the mock client func (c *Client) Start() error { return nil } - +// Stop stops the mock client func (c *Client) Stop() error { return c.settlementKV.Close() } - +// PostBatch saves the batch to the kv store func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchToSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -153,14 +153,14 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(100 * time.Millisecond) + time.Sleep(100 * time.Millisecond) // mimic a delay in batch acceptance ctx := context.Background() uevent.MustPublish(ctx, c.pubsub, settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) return nil } - +// GetLatestBatch returns the latest batch from the kv store func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() ix := c.slStateIndex @@ -172,17 +172,17 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } - +// GetLatestHeight returns the latest state update height from the settlement layer. func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight, nil } - +// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } - +// GetBatchAtIndex returns the batch at the given index func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -196,7 +196,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() defer c.mu.Unlock() - + // TODO: optimize (binary search, or just make another index) for i := c.slStateIndex; i > 0; i-- { b, err := c.GetBatchAtIndex(i) if err != nil { @@ -208,10 +208,10 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return b, nil } } - return nil, gerrc.ErrNotFound + return nil, gerrc.ErrNotFound // TODO: need to return a cosmos specific error? } - +// GetProposerAtHeight implements settlement.ClientI. func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -234,22 +234,22 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } - +// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in mock SL func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in local SL") } - +// GetAllSequencers implements settlement.ClientI. func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } - +// GetObsoleteDrs returns the list of deprecated DRS. func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } - +// GetBondedSequencers implements settlement.ClientI. func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -258,7 +258,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } - +// GetNextProposer implements settlement.ClientI. func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } @@ -274,7 +274,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { c.mu.Lock() defer c.mu.Unlock() - + // Save the batch to the next state index c.slStateIndex++ err = c.settlementKV.Set(keyFromIndex(c.slStateIndex), b) if err != nil { diff --git a/settlement/registry/registry.go b/settlement/registry/registry.go index c8bdbe5e5..9649f5c5b 100644 --- a/settlement/registry/registry.go +++ b/settlement/registry/registry.go @@ -7,26 +7,26 @@ import ( "github.com/dymensionxyz/dymint/settlement/local" ) - +// Client represents a settlement layer client type Client string const ( - + // Local is a mock client for the settlement layer Local Client = "mock" - + // Dymension is a client for interacting with dymension settlement layer Dymension Client = "dymension" - + // Mock client using grpc for a shared use Grpc Client = "grpc" ) - +// A central registry for all Settlement Layer Clients var clients = map[Client]func() settlement.ClientI{ Local: func() settlement.ClientI { return &local.Client{} }, Dymension: func() settlement.ClientI { return &dymension.Client{} }, Grpc: func() settlement.ClientI { return &grpc.Client{} }, } - +// GetClient returns client identified by name. func GetClient(client Client) settlement.ClientI { f, ok := clients[client] if !ok { @@ -35,7 +35,7 @@ func GetClient(client Client) settlement.ClientI { return f() } - +// RegisteredClients returns names of all settlement clients in registry. func RegisteredClients() []Client { registered := make([]Client, 0, len(clients)) for client := range clients { diff --git a/settlement/settlement.go b/settlement/settlement.go index fbbbf9a63..4b03327a2 100644 --- a/settlement/settlement.go +++ b/settlement/settlement.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - +// StatusCode is a type for settlement layer return status. type StatusCode uint64 - +// settlement layer return codes. const ( StatusUnknown StatusCode = iota StatusSuccess @@ -20,12 +20,12 @@ const ( ) type ResultBase struct { - + // Code is to determine if the action succeeded. Code StatusCode - + // Message may contain settlement layer specific information (like detailed error message, etc) Message string - - + // TODO(omritoptix): Move StateIndex to be part of the batch struct + // StateIndex is the rollapp-specific index the batch was saved in the SL StateIndex uint64 } @@ -34,16 +34,16 @@ type BatchMetaData struct { } type Batch struct { - + // sequencer is the bech32-encoded address of the sequencer sent the update Sequencer string StartHeight uint64 EndHeight uint64 BlockDescriptors []rollapp.BlockDescriptor NextSequencer string - + // MetaData about the batch in the DA layer MetaData *BatchMetaData - NumBlocks uint64 + NumBlocks uint64 // FIXME: can be removed. not used and will be deprecated } type ResultRetrieveBatch struct { @@ -56,51 +56,51 @@ type State struct { } type ResultGetHeightState struct { - ResultBase + ResultBase // NOTE: the state index of this will not be populated State } - +// Option is a function that sets a parameter on the settlement layer. type Option func(ClientI) - +// ClientI defines generic interface for Settlement layer interaction. type ClientI interface { - + // Init is called once for the client initialization Init(config Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...Option) error - + // Start is called once, after Init. It's implementation should start the client service. Start() error - + // Stop is called once, after Start. It should stop the client service. Stop() error - - + // SubmitBatch tries submitting the batch in an async way to the settlement layer. This should create a transaction which (potentially) + // triggers a state transition in the settlement layer. Events are emitted on success or failure. SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error - + // GetLatestBatch returns the latest batch from the settlement layer. GetLatestBatch() (*ResultRetrieveBatch, error) - + // GetBatchAtIndex returns the batch at the given index. GetBatchAtIndex(index uint64) (*ResultRetrieveBatch, error) - + // GetSequencerByAddress returns all sequencer information by its address. GetSequencerByAddress(address string) (types.Sequencer, error) - + // GetBatchAtHeight returns the batch at the given height. GetBatchAtHeight(index uint64) (*ResultRetrieveBatch, error) - + // GetLatestHeight returns the latest state update height from the settlement layer. GetLatestHeight() (uint64, error) - + // GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. GetLatestFinalizedHeight() (uint64, error) - + // GetAllSequencers returns all sequencers for this rollapp (bonded and not bonded). GetAllSequencers() ([]types.Sequencer, error) - + // GetBondedSequencers returns the list of the bonded sequencers for this rollapp. GetBondedSequencers() ([]types.Sequencer, error) - + // GetProposerAtHeight returns the current proposer for this chain. GetProposerAtHeight(height int64) (*types.Sequencer, error) - - + // GetNextProposer returns the next proposer for this chain in case of a rotation. + // If no rotation is in progress, it should return nil. GetNextProposer() (*types.Sequencer, error) - + // GetRollapp returns the rollapp information. GetRollapp() (*types.Rollapp, error) - + // GetObsoleteDrs returns the list of deprecated DRS. GetObsoleteDrs() ([]uint32, error) - + // GetSignerBalance returns the balance of the signer. GetSignerBalance() (types.Balance, error) - + // ValidateGenesisBridgeData validates the genesis bridge data. ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error } diff --git a/store/badger.go b/store/badger.go index 6a67526f2..5fbb244f5 100644 --- a/store/badger.go +++ b/store/badger.go @@ -16,7 +16,7 @@ import ( const ( gcTimeout = 1 * time.Minute - discardRatio = 0.5 + discardRatio = 0.5 // Recommended by badger. Indicates that a file will be rewritten if half the space can be discarded. ) var ( @@ -24,14 +24,14 @@ var ( _ KVBatch = &BadgerBatch{} ) - +// BadgerKV is a implementation of KVStore using Badger v3. type BadgerKV struct { db *badger.DB closing chan struct{} closeOnce sync.Once } - +// NewDefaultInMemoryKVStore builds KVStore that works in-memory (without accessing disk). func NewDefaultInMemoryKVStore() KV { db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) if err != nil { @@ -58,12 +58,12 @@ func NewKVStore(rootDir, dbPath, dbName string, syncWrites bool, logger types.Lo return b } - +// NewDefaultKVStore creates instance of default key-value store. func NewDefaultKVStore(rootDir, dbPath, dbName string) KV { return NewKVStore(rootDir, dbPath, dbName, true, log.NewNopLogger()) } - +// Rootify is helper function to make config creation independent of root dir func Rootify(rootDir, dbPath string) string { if filepath.IsAbs(dbPath) { return dbPath @@ -71,7 +71,7 @@ func Rootify(rootDir, dbPath string) string { return filepath.Join(rootDir, dbPath) } - +// Close implements KVStore. func (b *BadgerKV) Close() error { b.closeOnce.Do(func() { close(b.closing) @@ -85,7 +85,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L for { select { case <-b.closing: - + // Exit the periodic garbage collector function when store is closed return case <-ticker.C: err := b.db.RunValueLogGC(discardRatio) @@ -97,7 +97,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L } } - +// Get returns value for given key, or error. func (b *BadgerKV) Get(key []byte) ([]byte, error) { txn := b.db.NewTransaction(false) defer txn.Discard() @@ -111,7 +111,7 @@ func (b *BadgerKV) Get(key []byte) ([]byte, error) { return item.ValueCopy(nil) } - +// Set saves key-value mapping in store. func (b *BadgerKV) Set(key []byte, value []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -122,7 +122,7 @@ func (b *BadgerKV) Set(key []byte, value []byte) error { return txn.Commit() } - +// Delete removes key and corresponding value from store. func (b *BadgerKV) Delete(key []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -133,20 +133,20 @@ func (b *BadgerKV) Delete(key []byte) error { return txn.Commit() } - - +// NewBatch creates new batch. +// Note: badger batches should be short lived as they use extra resources. func (b *BadgerKV) NewBatch() KVBatch { return &BadgerBatch{ txn: b.db.NewTransaction(true), } } - +// BadgerBatch encapsulates badger transaction type BadgerBatch struct { txn *badger.Txn } - +// Set accumulates key-value entries in a transaction func (bb *BadgerBatch) Set(key, value []byte) error { if err := bb.txn.Set(key, value); err != nil { return err @@ -155,24 +155,24 @@ func (bb *BadgerBatch) Set(key, value []byte) error { return nil } - +// Delete removes the key and associated value from store func (bb *BadgerBatch) Delete(key []byte) error { return bb.txn.Delete(key) } - +// Commit commits a transaction func (bb *BadgerBatch) Commit() error { return bb.txn.Commit() } - +// Discard cancels a transaction func (bb *BadgerBatch) Discard() { bb.txn.Discard() } var _ KVIterator = &BadgerIterator{} - +// PrefixIterator returns instance of prefix Iterator for BadgerKV. func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { txn := b.db.NewTransaction(false) iter := txn.NewIterator(badger.DefaultIteratorOptions) @@ -185,7 +185,7 @@ func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { } } - +// BadgerIterator encapsulates prefix iterator for badger kv store. type BadgerIterator struct { txn *badger.Txn iter *badger.Iterator @@ -193,22 +193,22 @@ type BadgerIterator struct { lastError error } - +// Valid returns true if iterator is inside its prefix, false otherwise. func (i *BadgerIterator) Valid() bool { return i.iter.ValidForPrefix(i.prefix) } - +// Next progresses iterator to the next key-value pair. func (i *BadgerIterator) Next() { i.iter.Next() } - +// Key returns key pointed by iterator. func (i *BadgerIterator) Key() []byte { return i.iter.Item().KeyCopy(nil) } - +// Value returns value pointer by iterator. func (i *BadgerIterator) Value() []byte { val, err := i.iter.Item().ValueCopy(nil) if err != nil { @@ -217,45 +217,45 @@ func (i *BadgerIterator) Value() []byte { return val } - +// Error returns last error that occurred during iteration. func (i *BadgerIterator) Error() error { return i.lastError } - +// Discard has to be called to free iterator resources. func (i *BadgerIterator) Discard() { i.iter.Close() i.txn.Discard() } - - +// memoryEfficientBadgerConfig sets badger configuration parameters to reduce memory usage, specially during compactions to avoid memory spikes that causes OOM. +// based on https://github.com/celestiaorg/celestia-node/issues/2905 func memoryEfficientBadgerConfig(path string, syncWrites bool) *badger.Options { - opts := badger.DefaultOptions(path) - - + opts := badger.DefaultOptions(path) // this must be copied + // SyncWrites is a configuration option in Badger that determines whether writes are immediately synced to disk or no. + // If set to true it writes to the write-ahead log (value log) are synced to disk before being applied to the LSM tree. opts.SyncWrites = syncWrites - - - - + // default 64mib => 0 - disable block cache + // BlockCacheSize specifies how much data cache should hold in memory. + // It improves lookup performance but increases memory consumption. + // Not really necessary if disabling compression opts.BlockCacheSize = 0 - + // compressions reduces storage usage but increases memory consumption, specially during compaction opts.Compression = options.None - - + // MemTables: maximum size of in-memory data structures before they are flushed to disk + // default 64mib => 16mib - decreases memory usage and makes compaction more often opts.MemTableSize = 16 << 20 - - + // NumMemtables is a configuration option in Badger that sets the maximum number of memtables to keep in memory before stalling + // default 5 => 3 opts.NumMemtables = 3 - - + // NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts + // default 5 => 3 opts.NumLevelZeroTables = 3 - + // default 15 => 5 - this prevents memory growth on CPU constraint systems by blocking all writers opts.NumLevelZeroTablesStall = 5 - + // reducing number compactors, makes it slower but reduces memory usage during compaction opts.NumCompactors = 2 - + // makes sure badger is always compacted on shutdown opts.CompactL0OnClose = true return &opts diff --git a/store/prefix.go b/store/prefix.go index e0f4f77d6..23842dff3 100644 --- a/store/prefix.go +++ b/store/prefix.go @@ -5,18 +5,18 @@ var ( _ KVBatch = &PrefixKVBatch{} ) - +// PrefixKV is a key-value store that prepends all keys with given prefix. type PrefixKV struct { kv KV prefix []byte } - +// Close implements KVStore. func (p *PrefixKV) Close() error { return p.kv.Close() } - +// NewPrefixKV creates new PrefixKV on top of other KVStore. func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { return &PrefixKV{ kv: kv, @@ -24,22 +24,22 @@ func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { } } - +// Get returns value for given key. func (p *PrefixKV) Get(key []byte) ([]byte, error) { return p.kv.Get(append(p.prefix, key...)) } - +// Set updates the value for given key. func (p *PrefixKV) Set(key []byte, value []byte) error { return p.kv.Set(append(p.prefix, key...), value) } - +// Delete deletes key-value pair for given key. func (p *PrefixKV) Delete(key []byte) error { return p.kv.Delete(append(p.prefix, key...)) } - +// NewBatch creates a new batch. func (p *PrefixKV) NewBatch() KVBatch { return &PrefixKVBatch{ b: p.kv.NewBatch(), @@ -47,33 +47,33 @@ func (p *PrefixKV) NewBatch() KVBatch { } } - +// PrefixIterator creates iterator to traverse given prefix. func (p *PrefixKV) PrefixIterator(prefix []byte) KVIterator { return p.kv.PrefixIterator(append(p.prefix, prefix...)) } - +// PrefixKVBatch enables batching of operations on PrefixKV. type PrefixKVBatch struct { b KVBatch prefix []byte } - +// Set adds key-value pair to batch. func (pb *PrefixKVBatch) Set(key, value []byte) error { return pb.b.Set(append(pb.prefix, key...), value) } - +// Delete adds delete operation to batch. func (pb *PrefixKVBatch) Delete(key []byte) error { return pb.b.Delete(append(pb.prefix, key...)) } - +// Commit applies all operations in the batch atomically. func (pb *PrefixKVBatch) Commit() error { return pb.b.Commit() } - +// Discard discards all operations in the batch. func (pb *PrefixKVBatch) Discard() { pb.b.Discard() } diff --git a/store/pruning.go b/store/pruning.go index 5d3ee3ed3..5940f8ae9 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - +// PruneStore removes blocks up to (but not including) a height. It returns number of blocks pruned. func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { pruned := uint64(0) from, err := s.LoadBaseHeight() @@ -29,7 +29,7 @@ func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error return pruned, nil } - +// pruneHeights prunes all store entries that are stored along blocks (blocks,commit,proposer, etc) func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint64, error) { pruneBlocks := func(batch KVBatch, height uint64) error { hash, err := s.loadHashFromIndex(height) @@ -64,7 +64,7 @@ func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint6 return pruned, err } - +// prune is the function that iterates through all heights and prunes according to the pruning function set func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height uint64) error, logger types.Logger) (uint64, error) { pruned := uint64(0) batch := s.db.NewBatch() @@ -86,7 +86,7 @@ func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height u } pruned++ - + // flush every 1000 blocks to avoid batches becoming too large if pruned%1000 == 0 && pruned > 0 { err := flush(batch, h) if err != nil { diff --git a/store/store.go b/store/store.go index a0ee6dbd8..f0be24df9 100644 --- a/store/store.go +++ b/store/store.go @@ -30,33 +30,33 @@ var ( lastBlockSequencerSetPrefix = [1]byte{14} ) - +// DefaultStore is a default store implementation. type DefaultStore struct { db KV } var _ Store = &DefaultStore{} - +// New returns new, default store. func New(kv KV) Store { return &DefaultStore{ db: kv, } } - +// Close implements Store. func (s *DefaultStore) Close() error { return s.db.Close() } - +// NewBatch creates a new db batch. func (s *DefaultStore) NewBatch() KVBatch { return s.db.NewBatch() } - - - +// SaveBlock adds block to the store along with corresponding commit. +// Stored height is updated if block height is greater than stored value. +// In case a batch is provided, the block and commit are added to the batch and not saved. func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) { hash := block.Header.Hash() blockBlob, err := block.MarshalBinary() @@ -69,7 +69,7 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return batch, fmt.Errorf("marshal Commit to binary: %w", err) } - + // Not sure it's neeeded, as it's not used anywhere if batch != nil { err = multierr.Append(err, batch.Set(getBlockKey(hash), blockBlob)) err = multierr.Append(err, batch.Set(getCommitKey(hash), commitBlob)) @@ -94,10 +94,10 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return nil, nil } - - - - +// LoadBlock returns block at given height, or error if it's not found in Store. +// TODO(tzdybal): what is more common access pattern? by height or by hash? +// currently, we're indexing height->hash, and store blocks by hash, but we might as well store by height +// and index hash->height func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { h, err := s.loadHashFromIndex(height) if err != nil { @@ -106,7 +106,7 @@ func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { return s.LoadBlockByHash(h) } - +// LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { blockData, err := s.db.Get(getBlockKey(hash)) if err != nil { @@ -121,7 +121,7 @@ func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return block, nil } - +// SaveBlockSource saves block validation in Store. func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, batch KVBatch) (KVBatch, error) { b := make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(source)) @@ -132,7 +132,7 @@ func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, return batch, err } - +// LoadBlockSource returns block validation in Store. func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) { source, err := s.db.Get(getSourceKey(height)) if err != nil { @@ -141,7 +141,7 @@ func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) return types.BlockSource(binary.LittleEndian.Uint64(source)), nil } - +// SaveBlockResponses saves block responses (events, tx responses, etc) in Store. func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) { data, err := responses.Marshal() if err != nil { @@ -154,7 +154,7 @@ func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCI return batch, err } - +// LoadBlockResponses returns block results at given height, or error if it's not found in Store. func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) { data, err := s.db.Get(getResponsesKey(height)) if err != nil { @@ -168,7 +168,7 @@ func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses return &responses, nil } - +// LoadCommit returns commit for a block at given height, or error if it's not found in Store. func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { hash, err := s.loadHashFromIndex(height) if err != nil { @@ -177,7 +177,7 @@ func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { return s.LoadCommitByHash(hash) } - +// LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { commitData, err := s.db.Get(getCommitKey(hash)) if err != nil { @@ -191,8 +191,8 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return commit, nil } - - +// SaveState updates state saved in Store. Only one State is stored. +// If there is no State in Store, state will be saved. func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, error) { pbState, err := state.ToProto() if err != nil { @@ -210,7 +210,7 @@ func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, er return batch, err } - +// LoadState returns last state saved with UpdateState. func (s *DefaultStore) LoadState() (*types.State, error) { blob, err := s.db.Get(getStateKey()) if err != nil { @@ -231,7 +231,7 @@ func (s *DefaultStore) LoadState() (*types.State, error) { return &state, nil } - +// SaveProposer stores the proposer for given block height in store. func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) { pbProposer, err := proposer.ToProto() if err != nil { @@ -249,7 +249,7 @@ func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, bat return batch, err } - +// LoadProposer loads proposer at given block height from store. func (s *DefaultStore) LoadProposer(height uint64) (types.Sequencer, error) { blob, err := s.db.Get(getProposerKey(height)) if err != nil { diff --git a/store/storeIface.go b/store/storeIface.go index 4cdd2265b..8220b25ad 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -7,27 +7,27 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - +// KV encapsulates key-value store abstraction, in minimalistic interface. +// +// KV MUST be thread safe. type KV interface { - Get(key []byte) ([]byte, error) - Set(key []byte, value []byte) error - Delete(key []byte) error - NewBatch() KVBatch - PrefixIterator(prefix []byte) KVIterator - Close() error + Get(key []byte) ([]byte, error) // Get gets the value for a key. + Set(key []byte, value []byte) error // Set updates the value for a key. + Delete(key []byte) error // Delete deletes a key. + NewBatch() KVBatch // NewBatch creates a new batch. + PrefixIterator(prefix []byte) KVIterator // PrefixIterator creates iterator to traverse given prefix. + Close() error // Close closes the store. } - +// KVBatch enables batching of transactions. type KVBatch interface { - Set(key, value []byte) error - Delete(key []byte) error - Commit() error - Discard() + Set(key, value []byte) error // Accumulates KV entries in a transaction. + Delete(key []byte) error // Deletes the given key. + Commit() error // Commits the transaction. + Discard() // Discards the transaction. } - +// KVIterator enables traversal over a given prefix. type KVIterator interface { Valid() bool Next() @@ -37,37 +37,37 @@ type KVIterator interface { Discard() } - +// Store is minimal interface for storing and retrieving blocks, commits and state. type Store interface { - + // NewBatch creates a new db batch. NewBatch() KVBatch - + // SaveBlock saves block along with its seen commit (which will be included in the next block). SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) - + // LoadBlock returns block at given height, or error if it's not found in Store. LoadBlock(height uint64) (*types.Block, error) - + // LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. LoadBlockByHash(hash [32]byte) (*types.Block, error) - + // SaveBlockResponses saves block responses (events, tx responses, validator set updates, etc) in Store. SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) - + // LoadBlockResponses returns block results at given height, or error if it's not found in Store. LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) - + // LoadCommit returns commit for a block at given height, or error if it's not found in Store. LoadCommit(height uint64) (*types.Commit, error) - + // LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. LoadCommitByHash(hash [32]byte) (*types.Commit, error) - - + // SaveState updates state saved in Store. Only one State is stored. + // If there is no State in Store, state will be saved. SaveState(state *types.State, batch KVBatch) (KVBatch, error) - + // LoadState returns last state saved with UpdateState. LoadState() (*types.State, error) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go index ef45d2c3b..456f78b1d 100644 --- a/test/loadtime/cmd/load/main.go +++ b/test/loadtime/cmd/load/main.go @@ -10,20 +10,20 @@ import ( "github.com/dymensionxyz/dymint/test/pb/loadtime" ) - +// Ensure all of the interfaces are correctly satisfied. var ( _ loadtest.ClientFactory = (*ClientFactory)(nil) _ loadtest.Client = (*TxGenerator)(nil) ) - +// ClientFactory implements the loadtest.ClientFactory interface. type ClientFactory struct { ID []byte } - - - +// TxGenerator is responsible for generating transactions. +// TxGenerator holds the set of information that will be used to generate +// each transaction. type TxGenerator struct { id []byte conns uint64 @@ -32,7 +32,7 @@ type TxGenerator struct { } func main() { - u := [16]byte(uuid.New()) + u := [16]byte(uuid.New()) // generate run ID on startup if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{ID: u[:]}); err != nil { panic(err) } @@ -44,7 +44,7 @@ func main() { }) } - +// ValidateConfig validates the configuration for the load test. func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { psb, err := payload.MaxUnpaddedSize() if err != nil { @@ -56,9 +56,9 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { return nil } - - - +// NewClient creates a new client for the load test. +// +//nolint:gosec // params are always positive and fall in uint64 func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { return &TxGenerator{ id: f.ID, @@ -68,7 +68,7 @@ func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) }, nil } - +// GenerateTx generates a new transactions for the load test. func (c *TxGenerator) GenerateTx() ([]byte, error) { return payload.NewBytes(&loadtime.Payload{ Connections: c.conns, diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 1f17e6f17..4fd90ebe3 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -19,19 +19,19 @@ const ( var mainPrefix = [1]byte{0} - +// BlockStore is a thin wrapper around the DefaultStore which will be used for inspecting the blocks type BlockStore struct { *store.DefaultStore base uint64 height uint64 } - +// Height implements report.BlockStore. func (b *BlockStore) Height() uint64 { return b.height } - +// Base will be used to get the block height of the first block we want to generate the report for func (b *BlockStore) Base() uint64 { return b.base } diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go index 06f8d30b9..ba538a1df 100644 --- a/test/loadtime/payload/payload.go +++ b/test/loadtime/payload/payload.go @@ -16,9 +16,9 @@ const ( maxPayloadSize = 4 * 1024 * 1024 ) - - - +// NewBytes generates a new payload and returns the encoded representation of +// the payload as a slice of bytes. NewBytes uses the fields on the Options +// to create the payload. func NewBytes(p *loadtime.Payload) ([]byte, error) { p.Padding = make([]byte, 1) nullTime := time.Time{} @@ -32,12 +32,12 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { if p.Size() > maxPayloadSize { return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size(), maxPayloadSize) } - pSize := int(p.GetSize_()) + pSize := int(p.GetSize_()) // #nosec -- The "if" above makes this cast safe if pSize < us { return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) } - + // We halve the padding size because we transform the TX to hex p.Padding = make([]byte, (pSize-us)/2) _, err = rand.Read(p.Padding) if err != nil { @@ -49,14 +49,14 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { } h := []byte(hex.EncodeToString(b)) - - + // prepend a single key so that the kv store only ever stores a single + // transaction instead of storing all tx and ballooning in size. return append([]byte(keyPrefix), h...), nil } - - - +// FromBytes extracts a paylod from the byte representation of the payload. +// FromBytes leaves the padding untouched, returning it to the caller to handle +// or discard per their preference. func FromBytes(b []byte) (*loadtime.Payload, error) { trH := bytes.TrimPrefix(b, []byte(keyPrefix)) if bytes.Equal(b, trH) { @@ -75,8 +75,8 @@ func FromBytes(b []byte) (*loadtime.Payload, error) { return p, nil } - - +// MaxUnpaddedSize returns the maximum size that a payload may be if no padding +// is included. func MaxUnpaddedSize() (int, error) { p := &loadtime.Payload{ Time: time.Now(), @@ -88,9 +88,9 @@ func MaxUnpaddedSize() (int, error) { return CalculateUnpaddedSize(p) } - - - +// CalculateUnpaddedSize calculates the size of the passed in payload for the +// purpose of determining how much padding to add to reach the target size. +// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1. func CalculateUnpaddedSize(p *loadtime.Payload) (int, error) { if len(p.Padding) != 1 { return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index f38865ecf..0a8746d7d 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -13,66 +13,66 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - - +// BlockStore defines the set of methods needed by the report generator from +// Tendermint's store.Blockstore type. Using an interface allows for tests to +// more easily simulate the required behavior without having to use the more +// complex real API. type BlockStore interface { Height() uint64 Base() uint64 LoadBlock(uint64) (*types.Block, error) } - +// DataPoint contains the set of data collected for each transaction. type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte } - - +// Report contains the data calculated from reading the timestamped transactions +// of each block found in the blockstore. type Report struct { ID uuid.UUID Rate, Connections, Size uint64 Max, Min, Avg, StdDev time.Duration - - - - - + // NegativeCount is the number of negative durations encountered while + // reading the transaction data. A negative duration means that + // a transaction timestamp was greater than the timestamp of the block it + // was included in and likely indicates an issue with the experimental + // setup. NegativeCount int - + // TPS is calculated by taking the highest averaged TPS over all consecutive blocks TPS uint64 - - - + // All contains all data points gathered from all valid transactions. + // The order of the contents of All is not guaranteed to be match the order of transactions + // in the chain. All []DataPoint - + // used for calculating average during report creation. sum int64 } - +// Reports is a collection of Report objects. type Reports struct { s map[uuid.UUID]Report l []Report - - - + // errorCount is the number of parsing errors encountered while reading the + // transaction data. Parsing errors may occur if a transaction not generated + // by the payload package is submitted to the chain. errorCount int } - +// List returns a slice of all reports. func (rs *Reports) List() []Report { return rs.l } - +// ErrorCount returns the number of erronous transactions encountered while creating the report func (rs *Reports) ErrorCount() int { return rs.errorCount } @@ -100,9 +100,9 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has if int64(l) < 0 { r.NegativeCount++ } - - - + // Using an int64 here makes an assumption about the scale and quantity of the data we are processing. + // If all latencies were 2 seconds, we would need around 4 billion records to overflow this. + // We are therefore assuming that the data does not exceed these bounds. r.sum += int64(l) rs.s[id] = r } @@ -122,14 +122,14 @@ func (rs *Reports) calculateAll() { } } - +// calculateTPS calculates the TPS by calculating a average moving window with a minimum size of 1 second over all consecutive blocks func calculateTPS(in []DataPoint) uint64 { - + // create a map of block times to the number of transactions in that block blocks := make(map[time.Time]int) for _, v := range in { blocks[v.BlockTime]++ } - + // sort the blocks by time var blockTimes []time.Time for k := range blocks { blockTimes = append(blockTimes, k) @@ -137,7 +137,7 @@ func calculateTPS(in []DataPoint) uint64 { sort.Slice(blockTimes, func(i, j int) bool { return blockTimes[i].Before(blockTimes[j]) }) - + // Iterave over the blocks and calculate the tps starting from each block TPS := uint64(0) for index, blockTime := range blockTimes { currentTx := blocks[blockTime] @@ -160,8 +160,8 @@ func (rs *Reports) addError() { rs.errorCount++ } - - +// GenerateFromBlockStore creates a Report using the data in the provided +// BlockStore. func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID @@ -179,11 +179,11 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { s: make(map[uuid.UUID]Report), } - - - - - + // Deserializing to proto can be slow but does not depend on other data + // and can therefore be done in parallel. + // Deserializing in parallel does mean that the resulting data is + // not guaranteed to be delivered in the same order it was given to the + // worker pool. const poolSize = 16 txc := make(chan txData) diff --git a/testutil/block.go b/testutil/block.go index a07944d9e..f60257055 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -37,14 +37,14 @@ const ( DefaultTestBatchSize = 5 ) - - - +/* -------------------------------------------------------------------------- */ +/* utils */ +/* -------------------------------------------------------------------------- */ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.ClientI, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { genesis := GenerateGenesis(genesisHeight) - - + // Change the LastBlockHeight to avoid calling InitChainSync within the manager + // And updating the state according to the genesis. raw, _ := proposerKey.GetPublic().Raw() pubkey := ed25519.PubKey(raw) @@ -67,7 +67,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt return nil, err } - + // Init the settlement layer mock if settlementlc == nil { settlementlc = slregistry.GetClient(slregistry.Local) } @@ -96,7 +96,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt mp := mempoolv1.NewTxMempool(logger, tmcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0) mpIDs := nodemempool.NewMempoolIDs() - + // Init p2p client and validator p2pKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) p2pClient, err := p2p.NewClient(config.P2PConfig{ GossipSubCacheSize: 50, diff --git a/testutil/logger.go b/testutil/logger.go index c71789897..3ef7a902d 100644 --- a/testutil/logger.go +++ b/testutil/logger.go @@ -6,15 +6,15 @@ import ( "testing" ) +// TODO(tzdybal): move to some common place - - +// Logger is a simple, yet thread-safe, logger intended for use in unit tests. type Logger struct { mtx *sync.Mutex T *testing.T } - +// NewLogger create a Logger that outputs data using given testing.T instance. func NewLogger(t *testing.T) *Logger { return &Logger{ mtx: new(sync.Mutex), @@ -22,7 +22,7 @@ func NewLogger(t *testing.T) *Logger { } } - +// Debug prints a debug message. func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -30,7 +30,7 @@ func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"DEBUG: " + msg}, keyvals...)...) } - +// Info prints an info message. func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -38,7 +38,7 @@ func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"INFO: " + msg}, keyvals...)...) } - +// Error prints an error message. func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -46,24 +46,24 @@ func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"ERROR: " + msg}, keyvals...)...) } - - - +// MockLogger is a fake logger that accumulates all the inputs. +// +// It can be used in tests to ensure that certain messages was logged with correct severity. type MockLogger struct { DebugLines, InfoLines, ErrLines []string } - +// Debug saves a debug message. func (t *MockLogger) Debug(msg string, keyvals ...interface{}) { t.DebugLines = append(t.DebugLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } - +// Info saves an info message. func (t *MockLogger) Info(msg string, keyvals ...interface{}) { t.InfoLines = append(t.InfoLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } - +// Error saves an error message. func (t *MockLogger) Error(msg string, keyvals ...interface{}) { t.ErrLines = append(t.ErrLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } diff --git a/testutil/mocks.go b/testutil/mocks.go index e750c081b..176f8d6d7 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -29,27 +29,27 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - +// ABCIMethod is a string representing an ABCI method type ABCIMethod string const ( - + // InitChain is the string representation of the InitChain ABCI method InitChain ABCIMethod = "InitChain" - + // CheckTx is the string representation of the CheckTx ABCI method CheckTx ABCIMethod = "CheckTx" - + // BeginBlock is the string representation of the BeginBlockMethod ABCI method BeginBlock ABCIMethod = "BeginBlock" - + // DeliverTx is the string representation of the DeliverTx ABCI method DeliverTx ABCIMethod = "DeliverTx" - + // EndBlock is the string representation of the EndBlock ABCI method EndBlock ABCIMethod = "EndBlock" - + // Commit is the string representation of the Commit ABCI method Commit ABCIMethod = "Commit" - + // Info is the string representation of the Info ABCI method Info ABCIMethod = "Info" ) - +// GetABCIProxyAppMock returns a dummy abci proxy app mock for testing func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { app := GetAppMock() @@ -60,7 +60,7 @@ func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { return proxyApp } - +// GetAppMock returns a dummy abci app mock for testing func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app := &tmmocks.MockApplication{} gbdBz, _ := tmjson.Marshal(rollapptypes.GenesisBridgeData{}) @@ -72,7 +72,7 @@ func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) app.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) - + // iterate exclude methods and unset the mock for _, method := range excludeMethods { UnsetMockFn(app.On(string(method))) } @@ -92,7 +92,7 @@ var UnsetMockFn = func(call *mock.Call) { } } - +// CountMockCalls returns the number of times a mock specific function was called func CountMockCalls(totalCalls []mock.Call, methodName string) int { var count int for _, call := range totalCalls { @@ -103,7 +103,7 @@ func CountMockCalls(totalCalls []mock.Call, methodName string) int { return count } - +// MockStore is a mock store for testing type MockStore struct { ShoudFailSaveState bool ShouldFailUpdateStateWithBatch bool @@ -111,8 +111,8 @@ type MockStore struct { height uint64 } - - +// SetHeight sets the height of the mock store +// Don't set the height to mock failure in setting the height func (m *MockStore) SetHeight(height uint64) { m.height = height } @@ -125,7 +125,7 @@ func (m *MockStore) NextHeight() uint64 { return m.height + 1 } - +// UpdateState updates the state of the mock store func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KVBatch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") @@ -133,7 +133,7 @@ func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KV return m.DefaultStore.SaveState(state, batch) } - +// NewMockStore returns a new mock store func NewMockStore() *MockStore { defaultStore := store.New(store.NewDefaultInMemoryKVStore()) return &MockStore{ @@ -148,27 +148,27 @@ const ( connectionRefusedErrorMessage = "connection refused" ) - +// DALayerClientSubmitBatchError is a mock data availability layer client that can be used to test error handling type DALayerClientSubmitBatchError struct { localda.DataAvailabilityLayerClient } - +// SubmitBatch submits a batch to the data availability layer func (s *DALayerClientSubmitBatchError) SubmitBatch(_ *types.Batch) da.ResultSubmitBatch { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: connectionRefusedErrorMessage, Error: errors.New(connectionRefusedErrorMessage)}} } - +// DALayerClientRetrieveBatchesError is a mock data availability layer client that can be used to test error handling type DALayerClientRetrieveBatchesError struct { localda.DataAvailabilityLayerClient } - +// RetrieveBatches retrieves batches from the data availability layer func (m *DALayerClientRetrieveBatchesError) RetrieveBatches(_ *da.DASubmitMetaData) da.ResultRetrieveBatch { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: batchNotFoundErrorMessage, Error: da.ErrBlobNotFound}} } - +// SubscribeMock is a mock to provide a subscription like behavior for testing type SubscribeMock struct { messageCh chan interface{} } @@ -195,8 +195,8 @@ type MockDA struct { func NewMockDA(t *testing.T) (*MockDA, error) { mockDA := &MockDA{} - - + // Create DA + // init celestia DA with mock RPC client mockDA.DaClient = registry.GetClient("celestia") config := celestia.Config{ @@ -233,7 +233,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { nIDSize := 1 tree := exampleNMT(nIDSize, true, 1, 2, 3, 4) - + // build a proof for an NID that is within the namespace range of the tree proof, _ := tree.ProveNamespace(mockDA.NID) mockDA.BlobProof = blob.Proof([]*nmt.Proof{&proof}) @@ -244,7 +244,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { return mockDA, nil } - +// exampleNMT creates a new NamespacedMerkleTree with the given namespace ID size and leaf namespace IDs. Each byte in the leavesNIDs parameter corresponds to one leaf's namespace ID. If nidSize is greater than 1, the function repeats each NID in leavesNIDs nidSize times before prepending it to the leaf data. func exampleNMT(nidSize int, ignoreMaxNamespace bool, leavesNIDs ...byte) *nmt.NamespacedMerkleTree { tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(nidSize), nmt.IgnoreMaxNamespace(ignoreMaxNamespace)) for i, nid := range leavesNIDs { diff --git a/testutil/node.go b/testutil/node.go index ac7e294b1..1f7f0955f 100644 --- a/testutil/node.go +++ b/testutil/node.go @@ -24,7 +24,7 @@ import ( func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, genesis *types.GenesisDoc) (*node.Node, error) { app := GetAppMock(EndBlock) - + // Create proxy app clientCreator := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() @@ -48,7 +48,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, signingKey, pubkey, _ := crypto.GenerateEd25519Key(rand.Reader) pubkeyBytes, _ := pubkey.Raw() - + // Node config nodeConfig := config.DefaultNodeConfig if blockManagerConfig == nil { @@ -62,7 +62,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, } nodeConfig.BlockManagerConfig = *blockManagerConfig - + // SL config nodeConfig.SettlementConfig = settlement.Config{ProposerPubKey: hex.EncodeToString(pubkeyBytes)} node, err := node.NewNode( diff --git a/testutil/p2p.go b/testutil/p2p.go index 318abb499..8dd88fbe8 100644 --- a/testutil/p2p.go +++ b/testutil/p2p.go @@ -45,10 +45,10 @@ type HostDescr struct { RealKey bool } - +// copied from libp2p net/mock var blackholeIP6 = net.ParseIP("100::") - +// copied from libp2p net/mock func getAddr(sk crypto.PrivKey) (multiaddr.Multiaddr, error) { id, err := peer.IDFromPrivateKey(sk) if err != nil { @@ -92,7 +92,7 @@ func StartTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]Hos err := mnet.LinkAll() require.NoError(err) - + // prepare seed node lists seeds := make([]string, n) for src, descr := range conf { require.Less(src, n) diff --git a/testutil/rpc.go b/testutil/rpc.go index bbf17dae0..80b31c1e6 100644 --- a/testutil/rpc.go +++ b/testutil/rpc.go @@ -13,13 +13,13 @@ import ( ) func CreateLocalServer(t *testing.T) (*rpc.Server, net.Listener) { - + // Create a new local listener listener, err := nettest.NewLocalListener("tcp") require.NoError(t, err) serverReadyCh := make(chan bool, 1) var server *rpc.Server - + // Start server with listener go func() { node, err := CreateNode(true, nil, GenerateGenesis(0)) require.NoError(t, err) diff --git a/testutil/types.go b/testutil/types.go index 70ce267db..7f04ddd4d 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -21,9 +21,9 @@ import ( ) const ( - + // BlockVersion is the default block version for testing BlockVersion = 1 - + // AppVersion is the default app version for testing AppVersion = 0 SettlementAccountPrefix = "dym" @@ -63,7 +63,7 @@ func GenerateSettlementAddress() string { return addr } - +// generateBlock generates random blocks. func generateBlock(height uint64, proposerHash []byte, lastHeaderHash [32]byte) *types.Block { h := createRandomHashes() @@ -135,7 +135,7 @@ func GenerateBlocksWithTxs(startHeight uint64, num uint64, proposerKey crypto.Pr return blocks, nil } - +// GenerateBlocks generates random blocks. func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -163,7 +163,7 @@ func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, return blocks, nil } - +// GenerateCommits generates commits based on passed blocks. func GenerateCommits(blocks []*types.Block, proposerKey crypto.PrivKey) ([]*types.Commit, error) { commits := make([]*types.Commit, len(blocks)) @@ -205,7 +205,7 @@ func generateSignature(proposerKey crypto.PrivKey, header *types.Header) ([]byte return sign, nil } - +// GenerateBatch generates a batch out of random blocks func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) (*types.Batch, error) { blocks, err := GenerateBlocks(startHeight, endHeight-startHeight+1, proposerKey, lastBlockHeader) if err != nil { @@ -223,7 +223,7 @@ func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.Priv return batch, nil } - +// GenerateLastBatch generates a final batch with LastBatch flag set to true and different NextSequencerHash func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, nextSequencerKey crypto.PrivKey, lastHeaderHash [32]byte) (*types.Batch, error) { nextSequencerRaw, _ := nextSequencerKey.Raw() nextSeq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(nextSequencerRaw).PubKey(), 1)) @@ -248,7 +248,7 @@ func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto. return batch, nil } - +// GenerateLastBlocks es similar a GenerateBlocks pero incluye el NextSequencerHash func GenerateLastBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastHeaderHash [32]byte, nextSequencerHash [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -304,7 +304,7 @@ func MustGenerateBatchAndKey(startHeight uint64, endHeight uint64) *types.Batch return MustGenerateBatch(startHeight, endHeight, proposerKey) } - +// GenerateRandomValidatorSet generates random validator sets func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { return tmtypes.NewValidatorSet([]*tmtypes.Validator{ tmtypes.NewValidator(ed25519.GenPrivKey().PubKey(), 1), @@ -320,11 +320,11 @@ func GenerateSequencer() types.Sequencer { ) } - +// GenerateStateWithSequencer generates an initial state for testing. func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubkey tmcrypto.PubKey) *types.State { s := &types.State{ ChainID: "test-chain", - InitialHeight: uint64(initialHeight), + InitialHeight: uint64(initialHeight), //nolint:gosec // height is non-negative and falls in int64 AppHash: [32]byte{}, LastResultsHash: GetEmptyLastResultsHash(), Version: tmstate.Version{ @@ -350,11 +350,11 @@ func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubk GenerateSettlementAddress(), []string{GenerateSettlementAddress()}, )) - s.SetHeight(uint64(lastBlockHeight)) + s.SetHeight(uint64(lastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 return s } - +// GenerateGenesis generates a genesis for testing. func GenerateGenesis(initialHeight int64) *tmtypes.GenesisDoc { return &tmtypes.GenesisDoc{ ChainID: "test-chain", diff --git a/types/batch.go b/types/batch.go index ecfadd20f..14d486539 100644 --- a/types/batch.go +++ b/types/batch.go @@ -1,21 +1,21 @@ package types const ( - MaxBlockSizeAdjustment = 0.9 + MaxBlockSizeAdjustment = 0.9 // have a safety margin of 10% in regard of MaxBlockBatchSizeBytes ) - - +// Batch defines a struct for block aggregation for support of batching. +// TODO: maybe change to BlockBatch type Batch struct { Blocks []*Block Commits []*Commit - + // LastBatch is true if this is the last batch of the sequencer (i.e completes it's rotation flow). LastBatch bool DRSVersion []uint32 Revision uint64 } - +// StartHeight is the height of the first block in the batch. func (b Batch) StartHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -23,7 +23,7 @@ func (b Batch) StartHeight() uint64 { return b.Blocks[0].Header.Height } - +// EndHeight is the height of the last block in the batch func (b Batch) EndHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -31,14 +31,14 @@ func (b Batch) EndHeight() uint64 { return b.Blocks[len(b.Blocks)-1].Header.Height } - +// NumBlocks is the number of blocks in the batch func (b Batch) NumBlocks() uint64 { return uint64(len(b.Blocks)) } - - - +// SizeBlockAndCommitBytes returns the sum of the size of bytes of the blocks and commits +// The actual size of the batch may be different due to additional metadata and protobuf +// optimizations. func (b Batch) SizeBlockAndCommitBytes() int { cnt := 0 for _, block := range b.Blocks { diff --git a/types/block.go b/types/block.go index 153eb3333..e6d2c1673 100644 --- a/types/block.go +++ b/types/block.go @@ -8,40 +8,40 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) - +// Header defines the structure of Dymint block header. type Header struct { - + // Block and App version Version Version Height uint64 - Time int64 + Time int64 // UNIX time in nanoseconds. Use int64 as Golang stores UNIX nanoseconds in int64. - + // prev block info LastHeaderHash [32]byte - - LastCommitHash [32]byte - DataHash [32]byte - ConsensusHash [32]byte - AppHash [32]byte + // hashes of block data + LastCommitHash [32]byte // commit from sequencer(s) from the last block + DataHash [32]byte // Block.Data root aka Transactions + ConsensusHash [32]byte // consensus params for current block + AppHash [32]byte // state after applying txs from height-1 - - - + // Root hash of all results from the txs from the previous block. + // This is ABCI specific but smart-contract chains require some way of committing + // to transaction receipts/results. LastResultsHash [32]byte - - - - - ProposerAddress []byte + // Note that the address can be derived from the pubkey which can be derived + // from the signature when using secp256k. + // We keep this in case users choose another signature format where the + // pubkey can't be recovered by the signature (e.g. ed25519). + ProposerAddress []byte // original proposer of the block - + // Hash of proposer validatorSet (compatible with tendermint) SequencerHash [32]byte - + // Hash of the next proposer validatorSet (compatible with tendermint) NextSequencersHash [32]byte - + // The Chain ID ChainID string } @@ -54,16 +54,16 @@ var ( _ encoding.BinaryUnmarshaler = &Header{} ) - - - - +// Version captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +// This is equivalent to the tmversion.Consensus type in Tendermint. type Version struct { Block uint64 App uint64 } - +// Block defines the structure of Dymint block. type Block struct { Header Header Data Data @@ -83,7 +83,7 @@ var ( _ encoding.BinaryUnmarshaler = &Block{} ) - +// Data defines Dymint block data. type Data struct { Txs Txs IntermediateStateRoots IntermediateStateRoots @@ -91,16 +91,16 @@ type Data struct { ConsensusMessages []*proto.Any } - +// EvidenceData defines how evidence is stored in block. type EvidenceData struct { Evidence []Evidence } - +// Commit contains evidence of block creation. type Commit struct { Height uint64 HeaderHash [32]byte - + // TODO(omritoptix): Change from []Signature to Signature as it should be one signature per block Signatures []Signature TMSignature tmtypes.CommitSig } @@ -109,11 +109,11 @@ func (c Commit) SizeBytes() int { return c.ToProto().Size() } - +// Signature represents signature of block creator. type Signature []byte - - +// IntermediateStateRoots describes the state between transactions. +// They are required for fraud proofs. type IntermediateStateRoots struct { RawRootsList [][]byte } @@ -123,7 +123,7 @@ func GetLastCommitHash(lastCommit *Commit, header *Header) []byte { return lastABCICommit.Hash() } - +// GetDataHash returns the hash of the block data to be set in the block header. func GetDataHash(block *Block) []byte { abciData := tmtypes.Data{ Txs: ToABCIBlockDataTxs(&block.Data), diff --git a/types/block_source.go b/types/block_source.go index 43a2a0be5..e6304c524 100644 --- a/types/block_source.go +++ b/types/block_source.go @@ -24,7 +24,7 @@ var AllSources = []string{"none", "produced", "gossip", "blocksync", "da", "loca type BlockMetaData struct { Source BlockSource DAHeight uint64 - SequencerSet Sequencers + SequencerSet Sequencers // The set of Rollapp sequencers that were present in the Hub while producing this block } type CachedBlock struct { diff --git a/types/conv.go b/types/conv.go index 37f66eceb..afbfc94a6 100644 --- a/types/conv.go +++ b/types/conv.go @@ -6,22 +6,22 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) - - +// ToABCIHeaderPB converts Dymint header to Header format defined in ABCI. +// Caller should fill all the fields that are not available in Dymint header (like ChainID). func ToABCIHeaderPB(header *Header) types.Header { tmheader := ToABCIHeader(header) return *tmheader.ToProto() } - - +// ToABCIHeader converts Dymint header to Header format defined in ABCI. +// Caller should fill all the fields that are not available in Dymint header (like ChainID). func ToABCIHeader(header *Header) tmtypes.Header { return tmtypes.Header{ Version: version.Consensus{ Block: header.Version.Block, App: header.Version.App, }, - Height: int64(header.Height), + Height: int64(header.Height), //nolint:gosec // height is non-negative and falls in int64 Time: header.GetTimestamp(), LastBlockID: tmtypes.BlockID{ Hash: header.LastHeaderHash[:], @@ -43,12 +43,12 @@ func ToABCIHeader(header *Header) tmtypes.Header { } } - - +// ToABCIBlock converts Dymint block into block format defined by ABCI. +// Returned block should pass `ValidateBasic`. func ToABCIBlock(block *Block) (*tmtypes.Block, error) { abciHeader := ToABCIHeader(&block.Header) abciCommit := ToABCICommit(&block.LastCommit, &block.Header) - + // This assumes that we have only one signature if len(abciCommit.Signatures) == 1 { abciCommit.Signatures[0].ValidatorAddress = block.Header.ProposerAddress } @@ -65,7 +65,7 @@ func ToABCIBlock(block *Block) (*tmtypes.Block, error) { return &abciBlock, nil } - +// ToABCIBlockDataTxs converts Dymint block-data into block-data format defined by ABCI. func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { txs := make([]tmtypes.Tx, len(data.Txs)) for i := range data.Txs { @@ -74,7 +74,7 @@ func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { return txs } - +// ToABCIBlockMeta converts Dymint block into BlockMeta format defined by ABCI func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { tmblock, err := ToABCIBlock(block) if err != nil { @@ -90,13 +90,13 @@ func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { }, nil } - - - +// ToABCICommit converts Dymint commit into commit format defined by ABCI. +// This function only converts fields that are available in Dymint commit. +// Other fields (especially ValidatorAddress and Timestamp of Signature) has to be filled by caller. func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { headerHash := header.Hash() tmCommit := tmtypes.Commit{ - Height: int64(commit.Height), + Height: int64(commit.Height), //nolint:gosec // height is non-negative and falls in int64 Round: 0, BlockID: tmtypes.BlockID{ Hash: headerHash[:], @@ -106,7 +106,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { }, }, } - + // Check if TMSignature exists. if not use the previous dymint signature for backwards compatibility. if len(commit.TMSignature.Signature) == 0 { for _, sig := range commit.Signatures { commitSig := tmtypes.CommitSig{ @@ -115,7 +115,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { } tmCommit.Signatures = append(tmCommit.Signatures, commitSig) } - + // This assumes that we have only one signature if len(commit.Signatures) == 1 { tmCommit.Signatures[0].ValidatorAddress = header.ProposerAddress tmCommit.Signatures[0].Timestamp = header.GetTimestamp() diff --git a/types/errors.go b/types/errors.go index 418e5e5a6..033c5bd80 100644 --- a/types/errors.go +++ b/types/errors.go @@ -24,11 +24,11 @@ var ( ErrEmptyProposerAddress = errors.New("no proposer address") ) - +// TimeFraudMaxDrift is the maximum allowed time drift between the block time and the local time. var TimeFraudMaxDrift = 10 * time.Minute - - +// ErrFraudHeightMismatch is the fraud that occurs when the height of the block is different from the expected +// next height of the state. type ErrFraudHeightMismatch struct { Expected uint64 Actual uint64 @@ -37,7 +37,7 @@ type ErrFraudHeightMismatch struct { Proposer []byte } - +// NewErrFraudHeightMismatch creates a new ErrFraudHeightMismatch error. func NewErrFraudHeightMismatch(expected uint64, header *Header) error { return &ErrFraudHeightMismatch{ Expected: expected, @@ -56,7 +56,7 @@ func (e ErrFraudHeightMismatch) Unwrap() error { return gerrc.ErrFault } - +// ErrFraudAppHashMismatch is the fraud that occurs when the AppHash of the block is different from the expected AppHash. type ErrFraudAppHashMismatch struct { Expected [32]byte @@ -66,7 +66,7 @@ type ErrFraudAppHashMismatch struct { Proposer []byte } - +// NewErrFraudAppHashMismatch creates a new ErrFraudAppHashMismatch error. func NewErrFraudAppHashMismatch(expected [32]byte, header *Header) error { return &ErrFraudAppHashMismatch{ Expected: expected, @@ -86,7 +86,7 @@ func (e ErrFraudAppHashMismatch) Unwrap() error { return gerrc.ErrFault } - +// ErrLastResultsHashMismatch indicates a potential fraud when the LastResultsHash of a block does not match the expected value. type ErrLastResultsHashMismatch struct { Expected [32]byte @@ -96,7 +96,7 @@ type ErrLastResultsHashMismatch struct { LastResultHash [32]byte } - +// NewErrLastResultsHashMismatch creates a new ErrLastResultsHashMismatch error. func NewErrLastResultsHashMismatch(expected [32]byte, header *Header) error { return &ErrLastResultsHashMismatch{ Expected: expected, @@ -116,7 +116,7 @@ func (e ErrLastResultsHashMismatch) Unwrap() error { return gerrc.ErrFault } - +// ErrTimeFraud represents an error indicating a possible fraud due to time drift. type ErrTimeFraud struct { Drift time.Duration ProposerAddress []byte @@ -153,7 +153,7 @@ func (e ErrTimeFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrLastHeaderHashMismatch is the error that occurs when the last header hash does not match the expected value. type ErrLastHeaderHashMismatch struct { Expected [32]byte LastHeaderHash [32]byte @@ -174,7 +174,7 @@ func (e ErrLastHeaderHashMismatch) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidChainID is the fraud that occurs when the chain ID of the block is different from the expected chain ID. type ErrInvalidChainID struct { Expected string Block *Block @@ -200,8 +200,8 @@ func (e ErrInvalidChainID) Unwrap() error { return gerrc.ErrFault } - - +// ErrInvalidBlockHeightFraud is the fraud that happens when the height that is on the commit header is +// different from the height of the block. type ErrInvalidBlockHeightFraud struct { Expected uint64 Header *Header @@ -227,7 +227,7 @@ func (e ErrInvalidBlockHeightFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidHeaderHashFraud indicates a potential fraud when the Header Hash does not match the expected value. type ErrInvalidHeaderHashFraud struct { ExpectedHash [32]byte Header *Header @@ -253,7 +253,7 @@ func (e ErrInvalidHeaderHashFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidSignatureFraud indicates a potential fraud due to an invalid signature in the block. type ErrInvalidSignatureFraud struct { Err error Header *Header @@ -280,7 +280,7 @@ func (e ErrInvalidSignatureFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidProposerAddressFraud indicates a potential fraud when the proposer's address is invalid. type ErrInvalidProposerAddressFraud struct { ExpectedAddress []byte ActualAddress tmcrypto.Address @@ -308,7 +308,7 @@ func (e ErrInvalidProposerAddressFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidSequencerHashFraud indicates a potential fraud when the sequencer's hash is invalid. type ErrInvalidSequencerHashFraud struct { ExpectedHash [32]byte ActualHash []byte @@ -336,7 +336,7 @@ func (e ErrInvalidSequencerHashFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidNextSequencersHashFraud indicates a potential fraud when the NextSequencersHash does not match the expected value. type ErrInvalidNextSequencersHashFraud struct { ExpectedHash [32]byte Header Header @@ -361,7 +361,7 @@ func (e ErrInvalidNextSequencersHashFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrInvalidHeaderDataHashFraud indicates a potential fraud when the Header Data Hash does not match the expected value. type ErrInvalidHeaderDataHashFraud struct { Expected [32]byte Actual [32]byte @@ -390,7 +390,7 @@ func (e ErrInvalidHeaderDataHashFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateNumBlocksNotMatchingFraud represents an error where the number of blocks in the state update does not match the expected number. type ErrStateUpdateNumBlocksNotMatchingFraud struct { StateIndex uint64 SLNumBlocks uint64 @@ -418,8 +418,8 @@ func (e ErrStateUpdateNumBlocksNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - - +// ErrStateUpdateHeightNotMatchingFraud is the fraud that happens when the height that is on the commit header is +// different from the height of the block. type ErrStateUpdateHeightNotMatchingFraud struct { StateIndex uint64 SLBeginHeight uint64 @@ -449,7 +449,7 @@ func (e ErrStateUpdateHeightNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateStateRootNotMatchingFraud represents an error where the state roots do not match in the state update. type ErrStateUpdateStateRootNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -478,7 +478,7 @@ func (e ErrStateUpdateStateRootNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateTimestampNotMatchingFraud represents an error where the timestamps do not match in the state update. type ErrStateUpdateTimestampNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -506,7 +506,7 @@ func (e ErrStateUpdateTimestampNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateDoubleSigningFraud indicates a potential fraud due to double signing detected between DA and P2P blocks. type ErrStateUpdateDoubleSigningFraud struct { DABlock *Block P2PBlock *Block @@ -571,7 +571,7 @@ func getJsonFromBlock(block *Block) ([]byte, error) { return jsonBlock, nil } - +// ErrStateUpdateBlobNotAvailableFraud represents an error where a blob is not available in DA. type ErrStateUpdateBlobNotAvailableFraud struct { StateIndex uint64 DA string @@ -599,7 +599,7 @@ func (e ErrStateUpdateBlobNotAvailableFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateBlobCorruptedFraud represents an error where a blob is corrupted in DA. type ErrStateUpdateBlobCorruptedFraud struct { StateIndex uint64 DA string @@ -627,7 +627,7 @@ func (e ErrStateUpdateBlobCorruptedFraud) Unwrap() error { return gerrc.ErrFault } - +// ErrStateUpdateDRSVersionFraud represents an error where the DRS versions do not match in the state update. type ErrStateUpdateDRSVersionFraud struct { StateIndex uint64 Height uint64 diff --git a/types/evidence.go b/types/evidence.go index 8aff5b04f..ba17e1b0b 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -3,19 +3,19 @@ package types import ( "time" - - + // TODO: either copy the vanilla abci types (or the protos) into this repo + // or, import the vanilla tendermint types instead. abci "github.com/tendermint/tendermint/abci/types" ) - - +// Evidence represents any provable malicious activity by a validator. +// Verification logic for each evidence is part of the evidence module. type Evidence interface { - ABCI() []abci.Evidence - Bytes() []byte - Hash() []byte - Height() int64 - String() string - Time() time.Time - ValidateBasic() error + ABCI() []abci.Evidence // forms individual evidence to be sent to the application + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Height() int64 // height of the infraction + String() string // string format of the evidence + Time() time.Time // time of the infraction + ValidateBasic() error // basic consistency check } diff --git a/types/hashing.go b/types/hashing.go index 931df948a..17162ee0e 100644 --- a/types/hashing.go +++ b/types/hashing.go @@ -1,6 +1,6 @@ package types - +// Hash returns ABCI-compatible hash of a header. func (h *Header) Hash() [32]byte { var hash [32]byte abciHeader := ToABCIHeader(h) @@ -8,7 +8,7 @@ func (h *Header) Hash() [32]byte { return hash } - +// Hash returns ABCI-compatible hash of a block. func (b *Block) Hash() [32]byte { return b.Header.Hash() } diff --git a/types/instruction.go b/types/instruction.go index 8f735f6d1..ebae50aa5 100644 --- a/types/instruction.go +++ b/types/instruction.go @@ -33,7 +33,7 @@ func LoadInstructionFromDisk(dir string) (Instruction, error) { var instruction Instruction filePath := filepath.Join(dir, instructionFileName) - data, err := os.ReadFile(filePath) + data, err := os.ReadFile(filePath) // nolint:gosec if err != nil { return Instruction{}, err } diff --git a/types/logger.go b/types/logger.go index dfc89d708..e2c8fcdac 100644 --- a/types/logger.go +++ b/types/logger.go @@ -1,6 +1,6 @@ package types - +// Logger interface is compatible with Tendermint logger type Logger interface { Debug(msg string, keyvals ...interface{}) Info(msg string, keyvals ...interface{}) diff --git a/types/pb/dymensionxyz/dymension/rollapp/errors.go b/types/pb/dymensionxyz/dymension/rollapp/errors.go index 1d9d3c05c..2caa18964 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/errors.go +++ b/types/pb/dymensionxyz/dymension/rollapp/errors.go @@ -1,13 +1,13 @@ package rollapp - +// DONTCOVER import ( errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - +// x/rollapp module sentinel errors var ( ErrRollappExists = errorsmod.Register(ModuleName, 1000, "rollapp already exists") ErrInvalidInitialSequencer = errorsmod.Register(ModuleName, 1001, "empty initial sequencer") @@ -44,7 +44,7 @@ var ( ErrInvalidRequest = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid request") ErrInvalidVMType = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid vm type") - + /* ------------------------------ fraud related ----------------------------- */ ErrDisputeAlreadyFinalized = errorsmod.Register(ModuleName, 2000, "disputed height already finalized") ErrDisputeAlreadyReverted = errorsmod.Register(ModuleName, 2001, "disputed height already reverted") ErrWrongClientId = errorsmod.Register(ModuleName, 2002, "client id does not match the rollapp") diff --git a/types/pb/dymensionxyz/dymension/rollapp/events.go b/types/pb/dymensionxyz/dymension/rollapp/events.go index 259a12f03..ae0f6e3d1 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/events.go +++ b/types/pb/dymensionxyz/dymension/rollapp/events.go @@ -11,12 +11,12 @@ const ( AttributeKeyDAPath = "da_path" AttributeKeyStatus = "status" - + // EventTypeFraud is emitted when a fraud evidence is submitted EventTypeFraud = "fraud_proposal" AttributeKeyFraudHeight = "fraud_height" AttributeKeyFraudSequencer = "fraud_sequencer" AttributeKeyClientID = "client_id" - + // EventTypeTransferGenesisTransfersEnabled is when the bridge is enabled EventTypeTransferGenesisTransfersEnabled = "transfer_genesis_transfers_enabled" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/keys.go b/types/pb/dymensionxyz/dymension/rollapp/keys.go index 61858ca0d..ca4e7b64c 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/keys.go +++ b/types/pb/dymensionxyz/dymension/rollapp/keys.go @@ -1,19 +1,19 @@ package rollapp const ( - + // ModuleName defines the module name ModuleName = "rollapp" - + // StoreKey defines the primary module store key StoreKey = ModuleName - + // RouterKey is the message route for slashing RouterKey = ModuleName - + // QuerierRoute defines the module's query routing key QuerierRoute = ModuleName - + // MemStoreKey defines the in-memory store key MemStoreKey = "mem_rollapp" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go index 0dec4fc93..11b1c7f3c 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go +++ b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go @@ -25,7 +25,7 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidAddress, "invalid creator address (%s)", err) } - + // an update can't be with no BDs if msg.NumBlocks == uint64(0) { return errorsmod.Wrap(ErrInvalidNumBlocks, "number of blocks can not be zero") } @@ -34,22 +34,22 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidNumBlocks, "numBlocks(%d) + startHeight(%d) exceeds max uint64", msg.NumBlocks, msg.StartHeight) } - + // check to see that update contains all BDs if uint64(len(msg.BDs.BD)) != msg.NumBlocks { return errorsmod.Wrapf(ErrInvalidNumBlocks, "number of blocks (%d) != number of block descriptors(%d)", msg.NumBlocks, len(msg.BDs.BD)) } - + // check to see that startHeight is not zaro if msg.StartHeight == 0 { return errorsmod.Wrapf(ErrWrongBlockHeight, "StartHeight must be greater than zero") } - + // check that the blocks are sequential by height for bdIndex := uint64(0); bdIndex < msg.NumBlocks; bdIndex += 1 { if msg.BDs.BD[bdIndex].Height != msg.StartHeight+bdIndex { return ErrInvalidBlockSequence } - + // check to see stateRoot is a 32 byte array if len(msg.BDs.BD[bdIndex].StateRoot) != 32 { return errorsmod.Wrapf(ErrInvalidStateRoot, "StateRoot of block high (%d) must be 32 byte array. But received (%d) bytes", msg.BDs.BD[bdIndex].Height, len(msg.BDs.BD[bdIndex].StateRoot)) diff --git a/types/pb/dymensionxyz/dymension/rollapp/params.go b/types/pb/dymensionxyz/dymension/rollapp/params.go index 64c9ad818..f12bb0f0b 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/params.go +++ b/types/pb/dymensionxyz/dymension/rollapp/params.go @@ -2,7 +2,7 @@ package rollapp import "gopkg.in/yaml.v2" - +// String implements the Stringer interface. func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/pb/dymensionxyz/dymension/sequencer/events.go b/types/pb/dymensionxyz/dymension/sequencer/events.go index 01fd6ea51..eb93ddc7a 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/events.go +++ b/types/pb/dymensionxyz/dymension/sequencer/events.go @@ -1,27 +1,27 @@ package sequencer - +// Incentive module event types. const ( - + // EventTypeCreateSequencer is emitted when a sequencer is created EventTypeCreateSequencer = "create_sequencer" AttributeKeyRollappId = "rollapp_id" AttributeKeySequencer = "sequencer" AttributeKeyBond = "bond" AttributeKeyProposer = "proposer" - + // EventTypeUnbonding is emitted when a sequencer is unbonding EventTypeUnbonding = "unbonding" AttributeKeyCompletionTime = "completion_time" - + // EventTypeNoBondedSequencer is emitted when no bonded sequencer is found for a rollapp EventTypeNoBondedSequencer = "no_bonded_sequencer" - + // EventTypeProposerRotated is emitted when a proposer is rotated EventTypeProposerRotated = "proposer_rotated" - + // EventTypeUnbonded is emitted when a sequencer is unbonded EventTypeUnbonded = "unbonded" - + // EventTypeSlashed is emitted when a sequencer is slashed EventTypeSlashed = "slashed" ) diff --git a/types/pb/dymensionxyz/dymension/sequencer/keys.go b/types/pb/dymensionxyz/dymension/sequencer/keys.go index 8bae0a8a5..c4b84447f 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/keys.go +++ b/types/pb/dymensionxyz/dymension/sequencer/keys.go @@ -11,63 +11,63 @@ import ( var _ binary.ByteOrder const ( - + // ModuleName defines the module name ModuleName = "sequencer" - + // StoreKey defines the primary module store key StoreKey = ModuleName - + // RouterKey is the message route for slashing RouterKey = ModuleName - + // QuerierRoute defines the module's query routing key QuerierRoute = ModuleName - + // MemStoreKey defines the in-memory store key MemStoreKey = "mem_sequencer" ) var ( - + // KeySeparator defines the separator for keys KeySeparator = "/" - - SequencersKeyPrefix = []byte{0x00} + // SequencersKeyPrefix is the prefix to retrieve all Sequencers by their address + SequencersKeyPrefix = []byte{0x00} // prefix/seqAddr - - SequencersByRollappKeyPrefix = []byte{0x01} + // SequencersByRollappKeyPrefix is the prefix to retrieve all SequencersByRollapp + SequencersByRollappKeyPrefix = []byte{0x01} // prefix/rollappId BondedSequencersKeyPrefix = []byte{0xa1} UnbondedSequencersKeyPrefix = []byte{0xa2} UnbondingSequencersKeyPrefix = []byte{0xa3} - UnbondingQueueKey = []byte{0x41} + UnbondingQueueKey = []byte{0x41} // prefix for the timestamps in unbonding queue ) - +/* --------------------- specific sequencer address keys -------------------- */ func SequencerKey(sequencerAddress string) []byte { sequencerAddrBytes := []byte(sequencerAddress) return []byte(fmt.Sprintf("%s%s%s", SequencersKeyPrefix, KeySeparator, sequencerAddrBytes)) } - +// SequencerByRollappByStatusKey returns the store key to retrieve a SequencersByRollapp from the index fields func SequencerByRollappByStatusKey(rollappId, seqAddr string, status OperatingStatus) []byte { return append(SequencersByRollappByStatusKey(rollappId, status), []byte(seqAddr)...) } - +/* ------------------------- multiple sequencers keys ------------------------ */ func SequencersKey() []byte { return SequencersKeyPrefix } - +// SequencersByRollappKey returns the store key to retrieve a SequencersByRollapp from the index fields func SequencersByRollappKey(rollappId string) []byte { rollappIdBytes := []byte(rollappId) return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKeyPrefix, KeySeparator, rollappIdBytes)) } - +// SequencersByRollappByStatusKey returns the store key to retrieve a SequencersByRollappByStatus from the index fields func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) []byte { - + // Get the relevant key prefix based on the packet status var prefix []byte switch status { case Bonded: @@ -81,16 +81,16 @@ func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) [] return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKey(rollappId), KeySeparator, prefix)) } - +/* -------------------------- unbonding queue keys -------------------------- */ func UnbondingQueueByTimeKey(endTime time.Time) []byte { timeBz := sdk.FormatTimeBytes(endTime) prefixL := len(UnbondingQueueKey) bz := make([]byte, prefixL+len(timeBz)) - + // copy the prefix copy(bz[:prefixL], UnbondingQueueKey) - + // copy the encoded time bytes copy(bz[prefixL:prefixL+len(timeBz)], timeBz) return bz diff --git a/types/pb/dymensionxyz/dymension/sequencer/params.go b/types/pb/dymensionxyz/dymension/sequencer/params.go index de39b13dc..5bf8971f0 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/params.go +++ b/types/pb/dymensionxyz/dymension/sequencer/params.go @@ -4,7 +4,7 @@ import ( "gopkg.in/yaml.v2" ) - +// String implements the Stringer interface. func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/rollapp.go b/types/rollapp.go index 87951daf5..f6fcd1d14 100644 --- a/types/rollapp.go +++ b/types/rollapp.go @@ -14,7 +14,7 @@ type Revision struct { func (r Rollapp) LatestRevision() Revision { if len(r.Revisions) == 0 { - + // Revision 0 if no revisions exist. return Revision{} } return r.Revisions[len(r.Revisions)-1] diff --git a/types/sequencer_set.go b/types/sequencer_set.go index 1d294b2c7..6d40142e7 100644 --- a/types/sequencer_set.go +++ b/types/sequencer_set.go @@ -13,18 +13,18 @@ import ( "github.com/tendermint/tendermint/types" ) - - - +// Sequencer is a struct that holds the sequencer's information and tendermint validator. +// It is populated from the Hub on start and is periodically updated from the Hub polling. +// Uses tendermint's validator types for compatibility. type Sequencer struct { - + // SettlementAddress is the address of the sequencer in the settlement layer (bech32 string) SettlementAddress string - + // RewardAddr is the bech32-encoded sequencer's reward address RewardAddr string - + // WhitelistedRelayers is a list of the whitelisted relayer addresses. Addresses are bech32-encoded strings. WhitelistedRelayers []string - + // val is a tendermint validator type for compatibility. Holds the public key and cons address. val types.Validator } @@ -45,8 +45,8 @@ func NewSequencer( } } - - +// IsEmpty returns true if the sequencer is empty +// we check if the pubkey is nil func (s Sequencer) IsEmpty() bool { return s.val.PubKey == nil } @@ -71,7 +71,7 @@ func (s Sequencer) TMValset() (*types.ValidatorSet, error) { return types.ValidatorSetFromExistingValidators(s.TMValidators()) } - +// Hash returns tendermint compatible hash of the sequencer func (s Sequencer) Hash() ([]byte, error) { vs, err := s.TMValset() if err != nil { @@ -80,7 +80,7 @@ func (s Sequencer) Hash() ([]byte, error) { return vs.Hash(), nil } - +// MustHash returns tendermint compatible hash of the sequencer func (s Sequencer) MustHash() []byte { h, err := s.Hash() if err != nil { @@ -89,7 +89,7 @@ func (s Sequencer) MustHash() []byte { return h } - +// AnyConsPubKey returns sequencer's consensus public key represented as Cosmos proto.Any. func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { val := s.TMValidator() pubKey, err := cryptocodec.FromTmPubKeyInterface(val.PubKey) @@ -103,7 +103,7 @@ func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { return anyPK, nil } - +// MustFullHash returns a "full" hash of the sequencer that includes all fields of the Sequencer type. func (s Sequencer) MustFullHash() []byte { h := sha256.New() h.Write([]byte(s.SettlementAddress)) @@ -115,14 +115,14 @@ func (s Sequencer) MustFullHash() []byte { return h.Sum(nil) } - - - - - - - - +// SequencerListRightOuterJoin returns a set of sequencers that are in B but not in A. +// Sequencer is identified by a hash of all of it's fields. +// +// Example 1: +// +// s1 = {seq1, seq2, seq3} +// s2 = { seq2, seq3, seq4} +// s1 * s2 = { seq4} func SequencerListRightOuterJoin(A, B Sequencers) Sequencers { lhsSet := make(map[string]struct{}) for _, s := range A { @@ -141,13 +141,13 @@ func (s Sequencer) String() string { return fmt.Sprintf("Sequencer{SettlementAddress: %s RewardAddr: %s WhitelistedRelayers: %v Validator: %s}", s.SettlementAddress, s.RewardAddr, s.WhitelistedRelayers, s.val.String()) } - +// Sequencers is a list of sequencers. type Sequencers []Sequencer - - - - +// SequencerSet is a set of rollapp sequencers. It holds the entire set of sequencers +// that were ever associated with the rollapp (including bonded/unbonded/unbonding). +// It is populated from the Hub on start and is periodically updated from the Hub polling. +// This type is thread-safe. type SequencerSet struct { mu sync.RWMutex sequencers Sequencers @@ -160,7 +160,7 @@ func NewSequencerSet(s ...Sequencer) *SequencerSet { } } - +// Set sets the sequencers of the sequencer set. func (s *SequencerSet) Set(sequencers Sequencers) { s.mu.Lock() defer s.mu.Unlock() @@ -173,7 +173,7 @@ func (s *SequencerSet) GetAll() Sequencers { return slices.Clone(s.sequencers) } - +// GetByHash gets the sequencer by hash. It returns an error if the hash is not found in the sequencer set. func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -185,8 +185,8 @@ func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { return Sequencer{}, false } - - +// GetByAddress returns the sequencer with the given settlement address. +// used when handling events from the settlement, where the settlement address is used func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -198,7 +198,7 @@ func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) return Sequencer{}, false } - +// GetByConsAddress returns the sequencer with the given consensus address. func (s *SequencerSet) GetByConsAddress(consAddr []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -214,9 +214,9 @@ func (s *SequencerSet) String() string { return fmt.Sprintf("SequencerSet: %v", s.sequencers) } - - - +/* -------------------------- backward compatibility ------------------------- */ +// old dymint version used tendermint.ValidatorSet for sequencers +// these methods are used for backward compatibility func NewSequencerFromValidator(val types.Validator) *Sequencer { return &Sequencer{ diff --git a/types/serialization.go b/types/serialization.go index 14965e6a0..a4e79bb8e 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -12,17 +12,17 @@ import ( pb "github.com/dymensionxyz/dymint/types/pb/dymint" ) - +// MarshalBinary encodes Block into binary form and returns it. func (b *Block) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - +// MarshalBinary encodes Batch into binary form and returns it. func (b *Batch) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - +// UnmarshalBinary decodes binary form of Block into object. func (b *Block) UnmarshalBinary(data []byte) error { var pBlock pb.Block err := pBlock.Unmarshal(data) @@ -33,7 +33,7 @@ func (b *Block) UnmarshalBinary(data []byte) error { return err } - +// UnmarshalBinary decodes binary form of Batch into object. func (b *Batch) UnmarshalBinary(data []byte) error { var pBatch pb.Batch err := pBatch.Unmarshal(data) @@ -44,12 +44,12 @@ func (b *Batch) UnmarshalBinary(data []byte) error { return err } - +// MarshalBinary encodes Header into binary form and returns it. func (h *Header) MarshalBinary() ([]byte, error) { return h.ToProto().Marshal() } - +// UnmarshalBinary decodes binary form of Header into object. func (h *Header) UnmarshalBinary(data []byte) error { var pHeader pb.Header err := pHeader.Unmarshal(data) @@ -60,17 +60,17 @@ func (h *Header) UnmarshalBinary(data []byte) error { return err } - +// MarshalBinary encodes Data into binary form and returns it. func (d *Data) MarshalBinary() ([]byte, error) { return d.ToProto().Marshal() } - +// MarshalBinary encodes Commit into binary form and returns it. func (c *Commit) MarshalBinary() ([]byte, error) { return c.ToProto().Marshal() } - +// UnmarshalBinary decodes binary form of Commit into object. func (c *Commit) UnmarshalBinary(data []byte) error { var pCommit pb.Commit err := pCommit.Unmarshal(data) @@ -81,7 +81,7 @@ func (c *Commit) UnmarshalBinary(data []byte) error { return err } - +// ToProto converts Header into protobuf representation and returns it. func (h *Header) ToProto() *pb.Header { return &pb.Header{ Version: &pb.Version{Block: h.Version.Block, App: h.Version.App}, @@ -101,7 +101,7 @@ func (h *Header) ToProto() *pb.Header { } } - +// FromProto fills Header with data from its protobuf representation. func (h *Header) FromProto(other *pb.Header) error { h.Version.Block = other.Version.Block h.Version.App = other.Version.App @@ -140,8 +140,8 @@ func (h *Header) FromProto(other *pb.Header) error { return nil } - - +// safeCopy copies bytes from src slice into dst slice if both have same size. +// It returns true if sizes of src and dst are the same. func safeCopy(dst, src []byte) bool { if len(src) != len(dst) { return false @@ -150,7 +150,7 @@ func safeCopy(dst, src []byte) bool { return true } - +// ToProto converts Block into protobuf representation and returns it. func (b *Block) ToProto() *pb.Block { return &pb.Block{ Header: b.Header.ToProto(), @@ -159,7 +159,7 @@ func (b *Block) ToProto() *pb.Block { } } - +// ToProto converts Batch into protobuf representation and returns it. func (b *Batch) ToProto() *pb.Batch { return &pb.Batch{ StartHeight: b.StartHeight(), @@ -169,7 +169,7 @@ func (b *Batch) ToProto() *pb.Batch { } } - +// ToProto converts Data into protobuf representation and returns it. func (d *Data) ToProto() *pb.Data { return &pb.Data{ Txs: txsToByteSlices(d.Txs), @@ -179,7 +179,7 @@ func (d *Data) ToProto() *pb.Data { } } - +// FromProto fills Block with data from its protobuf representation. func (b *Block) FromProto(other *pb.Block) error { err := b.Header.FromProto(other.Header) if err != nil { @@ -199,7 +199,7 @@ func (b *Block) FromProto(other *pb.Block) error { return nil } - +// FromProto fills Batch with data from its protobuf representation. func (b *Batch) FromProto(other *pb.Batch) error { n := len(other.Blocks) start := other.StartHeight @@ -215,7 +215,7 @@ func (b *Batch) FromProto(other *pb.Batch) error { return nil } - +// ToProto converts Commit into protobuf representation and returns it. func (c *Commit) ToProto() *pb.Commit { return &pb.Commit{ Height: c.Height, @@ -230,14 +230,14 @@ func (c *Commit) ToProto() *pb.Commit { } } - +// FromProto fills Commit with data from its protobuf representation. func (c *Commit) FromProto(other *pb.Commit) error { c.Height = other.Height if !safeCopy(c.HeaderHash[:], other.HeaderHash) { return errors.New("invalid length of HeaderHash") } c.Signatures = byteSlicesToSignatures(other.Signatures) - + // For backwards compatibility with old state files that don't have this field. if other.TmSignature != nil { c.TMSignature = types.CommitSig{ BlockIDFlag: types.BlockIDFlag(other.TmSignature.BlockIdFlag), @@ -250,7 +250,7 @@ func (c *Commit) FromProto(other *pb.Commit) error { return nil } - +// ToProto converts State into protobuf representation and returns it. func (s *State) ToProto() (*pb.State, error) { var proposerProto *pb.Sequencer proposer := s.GetProposer() @@ -265,25 +265,25 @@ func (s *State) ToProto() (*pb.State, error) { return &pb.State{ Version: &s.Version, ChainId: s.ChainID, - InitialHeight: int64(s.InitialHeight), - LastBlockHeight: int64(s.Height()), + InitialHeight: int64(s.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 + LastBlockHeight: int64(s.Height()), //nolint:gosec // height is non-negative and falls in int64 ConsensusParams: s.ConsensusParams, LastResultsHash: s.LastResultsHash[:], LastHeaderHash: s.LastHeaderHash[:], AppHash: s.AppHash[:], RollappParams: s.RollappParams, Proposer: proposerProto, - RevisionStartHeight: int64(s.RevisionStartHeight), + RevisionStartHeight: int64(s.RevisionStartHeight), //nolint:gosec // height is non-negative and falls in int64 }, nil } - +// FromProto fills State with data from its protobuf representation. func (s *State) FromProto(other *pb.State) error { s.Version = *other.Version s.ChainID = other.ChainId - s.InitialHeight = uint64(other.InitialHeight) - s.SetHeight(uint64(other.LastBlockHeight)) - s.RevisionStartHeight = uint64(other.RevisionStartHeight) + s.InitialHeight = uint64(other.InitialHeight) //nolint:gosec // height is non-negative and falls in int64 + s.SetHeight(uint64(other.LastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 + s.RevisionStartHeight = uint64(other.RevisionStartHeight) //nolint:gosec // height is non-negative and falls in int64 if other.Proposer != nil { proposer, err := SequencerFromProto(other.Proposer) if err != nil { @@ -291,7 +291,7 @@ func (s *State) FromProto(other *pb.State) error { } s.SetProposer(proposer) } else { - + // proposer may be nil in the state s.SetProposer(nil) } @@ -303,7 +303,7 @@ func (s *State) FromProto(other *pb.State) error { return nil } - +// ToProto converts Sequencer into protobuf representation and returns it. func (s *Sequencer) ToProto() (*pb.Sequencer, error) { if s == nil { return nil, fmt.Errorf("nil sequencer") @@ -320,7 +320,7 @@ func (s *Sequencer) ToProto() (*pb.Sequencer, error) { }, nil } - +// SequencerFromProto fills Sequencer with data from its protobuf representation. func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { if seq == nil { return nil, fmt.Errorf("nil sequencer") @@ -337,7 +337,7 @@ func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { }, nil } - +// ToProto converts Sequencers into protobuf representation and returns it. func (s Sequencers) ToProto() (*pb.SequencerSet, error) { seqs := make([]pb.Sequencer, len(s)) for i, seq := range s { @@ -350,7 +350,7 @@ func (s Sequencers) ToProto() (*pb.SequencerSet, error) { return &pb.SequencerSet{Sequencers: seqs}, nil } - +// SequencersFromProto fills Sequencers with data from its protobuf representation. func SequencersFromProto(s *pb.SequencerSet) (Sequencers, error) { if s == nil { return Sequencers{}, fmt.Errorf("nil sequencer set") @@ -389,7 +389,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { var ret []*abci.Evidence for _, e := range evidence.Evidence { for _, ae := range e.ABCI() { - ret = append(ret, &ae) + ret = append(ret, &ae) //#nosec } } return ret @@ -397,7 +397,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { func evidenceFromProto([]*abci.Evidence) EvidenceData { var ret EvidenceData - + // TODO(tzdybal): right now Evidence is just an interface without implementations return ret } @@ -423,7 +423,7 @@ func byteSlicesToSignatures(bytes [][]byte) []Signature { return sigs } - +// Convert a list of blocks to a list of protobuf blocks. func blocksToProto(blocks []*Block) []*pb.Block { pbBlocks := make([]*pb.Block, len(blocks)) for i, b := range blocks { @@ -432,7 +432,7 @@ func blocksToProto(blocks []*Block) []*pb.Block { return pbBlocks } - +// protoToBlocks converts a list of protobuf blocks to a list of go struct blocks. func protoToBlocks(pbBlocks []*pb.Block) []*Block { blocks := make([]*Block, len(pbBlocks)) for i, b := range pbBlocks { @@ -445,7 +445,7 @@ func protoToBlocks(pbBlocks []*pb.Block) []*Block { return blocks } - +// commitsToProto converts a list of commits to a list of protobuf commits. func commitsToProto(commits []*Commit) []*pb.Commit { pbCommits := make([]*pb.Commit, len(commits)) for i, c := range commits { @@ -454,7 +454,7 @@ func commitsToProto(commits []*Commit) []*pb.Commit { return pbCommits } - +// protoToCommits converts a list of protobuf commits to a list of go struct commits. func protoToCommits(pbCommits []*pb.Commit) []*Commit { commits := make([]*Commit, len(pbCommits)) for i, c := range pbCommits { diff --git a/types/state.go b/types/state.go index cf1442b18..aa96bc985 100644 --- a/types/state.go +++ b/types/state.go @@ -5,7 +5,7 @@ import ( "fmt" "sync/atomic" - + // TODO(tzdybal): copy to local project? tmcrypto "github.com/tendermint/tendermint/crypto" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -16,34 +16,34 @@ import ( const rollappparams_modulename = "rollappparams" - +// State contains information about current state of the blockchain. type State struct { Version tmstate.Version RevisionStartHeight uint64 - + // immutable ChainID string - InitialHeight uint64 + InitialHeight uint64 // should be 1, not 0, when starting from height 1 - + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight atomic.Uint64 - + // Proposer is a sequencer that acts as a proposer. Can be nil if no proposer is set. Proposer atomic.Pointer[Sequencer] - - + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. ConsensusParams tmproto.ConsensusParams - + // Merkle root of the results from executing prev block LastResultsHash [32]byte - + // the latest AppHash we've received from calling abci.Commit() AppHash [32]byte - + // New rollapp parameters . RollappParams dymint.RollappParams - + // LastHeaderHash is the hash of the last block header. LastHeaderHash [32]byte } @@ -59,7 +59,7 @@ func (s *State) GetProposerPubKey() tmcrypto.PubKey { return proposer.PubKey() } - +// GetProposerHash returns the hash of the proposer func (s *State) GetProposerHash() []byte { proposer := s.Proposer.Load() if proposer == nil { @@ -68,7 +68,7 @@ func (s *State) GetProposerHash() []byte { return proposer.MustHash() } - +// SetProposer sets the proposer. It may set the proposer to nil. func (s *State) SetProposer(proposer *Sequencer) { s.Proposer.Store(proposer) } @@ -81,18 +81,18 @@ type RollappParams struct { Params *dymint.RollappParams } - - +// SetHeight sets the height saved in the Store if it is higher than the existing height +// returns OK if the value was updated successfully or did not need to be updated func (s *State) SetHeight(height uint64) { s.LastBlockHeight.Store(height) } - +// Height returns height of the highest block saved in the Store. func (s *State) Height() uint64 { return s.LastBlockHeight.Load() } - +// NextHeight returns the next height that expected to be stored in store. func (s *State) NextHeight() uint64 { if s.IsGenesis() { return s.InitialHeight @@ -100,7 +100,7 @@ func (s *State) NextHeight() uint64 { return s.Height() + 1 } - +// SetRollappParamsFromGenesis sets the rollapp consensus params from genesis func (s *State) SetRollappParamsFromGenesis(appState json.RawMessage) error { var objmap map[string]json.RawMessage err := json.Unmarshal(appState, &objmap) diff --git a/types/tx.go b/types/tx.go index fe4d1f6fa..0565c5a47 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,20 +6,20 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" ) - +// Tx represents transaction. type Tx []byte - +// Txs represents a slice of transactions. type Txs []Tx - +// Hash computes the TMHASH hash of the wire encoded transaction. func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } - - - +// Proof returns a simple merkle proof for this node. +// Panics if i < 0 or i >= len(txs) +// TODO: optimize this! func (txs Txs) Proof(i int) TxProof { l := len(txs) bzs := make([][]byte, l) @@ -35,7 +35,7 @@ func (txs Txs) Proof(i int) TxProof { } } - +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { RootHash tmbytes.HexBytes `json:"root_hash"` Data Tx `json:"data"` diff --git a/types/validation.go b/types/validation.go index 3b7c37f48..aa5bedae4 100644 --- a/types/validation.go +++ b/types/validation.go @@ -21,7 +21,7 @@ func ValidateProposedTransition(state *State, block *Block, commit *Commit, prop return nil } - +// ValidateBasic performs basic validation of a block. func (b *Block) ValidateBasic() error { err := b.Header.ValidateBasic() if err != nil { @@ -93,7 +93,7 @@ func (b *Block) ValidateWithState(state *State) error { return nil } - +// ValidateBasic performs basic validation of a header. func (h *Header) ValidateBasic() error { if len(h.ProposerAddress) == 0 { return ErrEmptyProposerAddress @@ -102,13 +102,13 @@ func (h *Header) ValidateBasic() error { return nil } - - +// ValidateBasic performs basic validation of block data. +// Actually it's a placeholder, because nothing is checked. func (d *Data) ValidateBasic() error { return nil } - +// ValidateBasic performs basic validation of a commit. func (c *Commit) ValidateBasic() error { if c.Height > 0 { if len(c.Signatures) != 1 { @@ -133,7 +133,7 @@ func (c *Commit) ValidateWithHeader(proposerPubKey tmcrypto.PubKey, header *Head return err } - + // commit is validated to have single signature if !proposerPubKey.VerifySignature(abciHeaderBytes, c.Signatures[0]) { return NewErrInvalidSignatureFraud(ErrInvalidSignature, header, c) } diff --git a/utils/atomic/funcs.go b/utils/atomic/funcs.go index d6cca097e..1812d0959 100644 --- a/utils/atomic/funcs.go +++ b/utils/atomic/funcs.go @@ -4,10 +4,12 @@ import ( "sync/atomic" ) +/* +TODO: move to sdk-utils +*/ - - +// Uint64Sub does x := x-y and returns the new value of x func Uint64Sub(x *atomic.Uint64, y uint64) uint64 { - + // Uses math return x.Add(^(y - 1)) } diff --git a/utils/channel/funcs.go b/utils/channel/funcs.go index 2513314dd..614414a3f 100644 --- a/utils/channel/funcs.go +++ b/utils/channel/funcs.go @@ -1,7 +1,7 @@ package channel - - +// DrainForever will drain the channels in separate go routines in a loop forever +// Intended for tests only func DrainForever[T any](chs ...<-chan T) { for _, ch := range chs { go func() { @@ -12,17 +12,17 @@ func DrainForever[T any](chs ...<-chan T) { } } - - +// Nudger can be used to make a goroutine ('A') sleep, and have another goroutine ('B') wake him up +// A will not block if B is not asleep. type Nudger struct { - C chan struct{} + C chan struct{} // Receive on C to sleep } func NewNudger() *Nudger { return &Nudger{make(chan struct{})} } - +// Nudge wakes up the waiting thread if any. Non blocking. func (w Nudger) Nudge() { select { case w.C <- struct{}{}: diff --git a/utils/errors/err_group.go b/utils/errors/err_group.go index 6ddb5d414..c4d82409a 100644 --- a/utils/errors/err_group.go +++ b/utils/errors/err_group.go @@ -5,12 +5,14 @@ import ( "golang.org/x/sync/errgroup" ) - - - - - - +/* +TODO: move to sdk-utils +*/ + +// ErrGroupGoLog calls eg.Go on the errgroup but it will log the error immediately when it occurs +// instead of waiting for all goroutines in the group to finish first. This has the advantage of making sure all +// errors are logged, not just the first one, and it is more immediate. Also, it is guaranteed, in case that +// of the goroutines is not properly context aware. func ErrGroupGoLog(eg *errgroup.Group, logger types.Logger, fn func() error) { eg.Go(func() error { err := fn() diff --git a/utils/event/funcs.go b/utils/event/funcs.go index 000cbf3f0..8b76b7ce0 100644 --- a/utils/event/funcs.go +++ b/utils/event/funcs.go @@ -12,9 +12,9 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) - - - +// MustSubscribe subscribes to events and sends back a callback +// clientID is essentially the subscriber id, see https://pkg.go.dev/github.com/tendermint/tendermint/libs/pubsub#pkg-overview +// - will not panic on context cancel or deadline exceeded func MustSubscribe( ctx context.Context, pubsubServer *pubsub.Server, @@ -46,7 +46,7 @@ func MustSubscribe( } } - +// MustPublish submits an event or panics - will not panic on context cancel or deadline exceeded func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface{}, events map[string][]string) { err := pubsubServer.PublishWithEvents(ctx, msg, events) if err != nil && !errors.Is(err, context.Canceled) { @@ -54,7 +54,7 @@ func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface } } - +// QueryFor returns a query for the given event. func QueryFor(eventTypeKey, eventType string) tmpubsub.Query { return tmquery.MustParse(fmt.Sprintf("%s='%s'", eventTypeKey, eventType)) } diff --git a/utils/queue/queue.go b/utils/queue/queue.go index 4600ddd86..17b760ecd 100644 --- a/utils/queue/queue.go +++ b/utils/queue/queue.go @@ -5,40 +5,40 @@ import ( "strings" ) - - +// Queue holds elements in an array-list. +// This implementation is NOT thread-safe! type Queue[T any] struct { elements []T } - +// FromSlice instantiates a new queue from the given slice. func FromSlice[T any](s []T) *Queue[T] { return &Queue[T]{elements: s} } - +// New instantiates a new empty queue func New[T any]() *Queue[T] { return &Queue[T]{elements: make([]T, 0)} } - +// Enqueue adds a value to the end of the queue func (q *Queue[T]) Enqueue(values ...T) { q.elements = append(q.elements, values...) } - +// DequeueAll returns all queued elements (FIFO order) and cleans the entire queue. func (q *Queue[T]) DequeueAll() []T { values := q.elements q.elements = make([]T, 0) return values } - +// Size returns number of elements within the queue. func (q *Queue[T]) Size() int { return len(q.elements) } - +// String returns a string representation. func (q *Queue[T]) String() string { str := "Queue[" values := []string{} diff --git a/utils/retry/backoff.go b/utils/retry/backoff.go index b9276edaa..05d7ac53d 100644 --- a/utils/retry/backoff.go +++ b/utils/retry/backoff.go @@ -10,14 +10,14 @@ const ( defaultBackoffFactor = 2 ) - +// BackoffConfig is a configuration for a backoff, it's used to create new instances type BackoffConfig struct { InitialDelay time.Duration `json:"initial_delay"` MaxDelay time.Duration `json:"max_delay"` GrowthFactor float64 `json:"growth_factor"` } - +// Backoff creates a new Backoff instance with the configuration (starting at 0 attempts made so far) func (c BackoffConfig) Backoff() Backoff { return Backoff{ delay: c.InitialDelay, @@ -40,16 +40,16 @@ func WithInitialDelay(d time.Duration) BackoffOption { } } - - +// WithMaxDelay sets the maximum delay for the backoff. The delay will not exceed this value. +// Set 0 to disable the maximum delay. func WithMaxDelay(d time.Duration) BackoffOption { return func(b *BackoffConfig) { b.MaxDelay = d } } - - +// WithGrowthFactor sets the growth factor for the backoff. The delay will be multiplied by this factor on each call to Delay. +// The factor should be greater than 1.0 func WithGrowthFactor(x float64) BackoffOption { return func(b *BackoffConfig) { b.GrowthFactor = x @@ -68,7 +68,7 @@ func NewBackoffConfig(opts ...BackoffOption) BackoffConfig { return ret } - +// Delay returns the current delay. The subsequent delay will be increased by the growth factor up to the maximum. func (b *Backoff) Delay() time.Duration { ret := b.delay b.delay = time.Duration(float64(b.delay) * b.growthFactor) @@ -78,7 +78,7 @@ func (b *Backoff) Delay() time.Duration { return ret } - +// Sleep sleeps for the current delay. The subsequent delay will be increased by the growth factor up to the maximum. func (b *Backoff) Sleep() { time.Sleep(b.Delay()) } diff --git a/utils/retry/doc.go b/utils/retry/doc.go index 6d41b0f16..fe69a7266 100644 --- a/utils/retry/doc.go +++ b/utils/retry/doc.go @@ -1,4 +1,4 @@ - - - +// Package retry shall be used alongside "github.com/avast/retry-go/v4" for simple retry patterns +// which the avast package makes difficult. +// Methods in here should be simple and not warrant another dependency. package retry diff --git a/version/version.go b/version/version.go index d461e5b9b..acbae16e8 100644 --- a/version/version.go +++ b/version/version.go @@ -15,5 +15,5 @@ func GetDRSVersion() (uint32, error) { if err != nil { return uint32(0), fmt.Errorf("converting DRS version to int: %v", err) } - return uint32(currentDRS), nil + return uint32(currentDRS), nil //nolint:gosec // DRS is uint32 } From 806e1cdb24596000b58e5fba5b988bc5d6a89ec8 Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:52:07 +0000 Subject: [PATCH 3/4] try again --- block/balance.go | 2 +- block/block.go | 110 ++--- block/block_cache.go | 2 +- block/consensus.go | 2 +- block/executor.go | 44 +- block/fork.go | 98 ++-- block/fraud.go | 12 +- block/initchain.go | 12 +- block/manager.go | 146 +++--- block/modes.go | 40 +- block/p2p.go | 16 +- block/produce.go | 126 ++--- block/pruning.go | 16 +- block/retriever.go | 32 +- block/sequencers.go | 58 +-- block/slvalidator.go | 70 +-- block/state.go | 50 +- block/submit.go | 94 ++-- block/sync.go | 44 +- block/validate.go | 12 +- cmd/dymint/commands/init.go | 8 +- cmd/dymint/commands/root.go | 8 +- cmd/dymint/commands/show_node_id.go | 4 +- cmd/dymint/commands/show_sequencer.go | 4 +- cmd/dymint/commands/start.go | 12 +- cmd/dymint/main.go | 2 +- config/config.go | 62 +-- config/defaults.go | 10 +- config/flags.go | 10 +- config/p2p.go | 20 +- config/rpc.go | 46 +- config/toml.go | 16 +- conv/config.go | 15 +- conv/crypto.go | 2 +- da/avail/avail.go | 68 +-- da/celestia/celestia.go | 68 +-- da/celestia/config.go | 6 +- da/celestia/mock/messages.go | 22 +- da/celestia/mock/server.go | 8 +- da/celestia/rpc.go | 18 +- da/celestia/types/rpc.go | 6 +- da/celestia/types/types.go | 66 +-- da/da.go | 126 ++--- da/errors.go | 22 +- da/grpc/grpc.go | 30 +- da/grpc/mockserv/mockserv.go | 2 +- da/local/local.go | 36 +- da/registry/registry.go | 6 +- indexers/blockindexer/block.go | 14 +- indexers/blockindexer/kv/kv.go | 140 +++--- indexers/blockindexer/null/null.go | 2 +- indexers/blockindexer/query_range.go | 30 +- indexers/txindex/indexer.go | 26 +- indexers/txindex/indexer_service.go | 30 +- indexers/txindex/kv/kv.go | 206 ++++---- indexers/txindex/kv/utils.go | 2 +- indexers/txindex/null/null.go | 8 +- mempool/cache.go | 32 +- mempool/clist/clist.go | 148 +++--- mempool/ids.go | 2 +- mempool/mempool.go | 128 ++--- mempool/metrics.go | 40 +- mempool/mock/mempool.go | 2 +- mempool/tx.go | 12 +- mempool/v1/mempool.go | 448 +++++++++--------- mempool/v1/tx.go | 38 +- .../dymint/block/mock_ExecutorI.go | 124 ++--- .../dymint/block/mock_FraudHandler.go | 18 +- .../dymint/da/avail/mock_SubstrateApiI.go | 380 +++++++-------- .../celestia/types/mock_CelestiaRPCClient.go | 94 ++-- .../da/mock_DataAvailabilityLayerClient.go | 76 +-- .../dymint/p2p/mock_ProposerGetter.go | 20 +- .../dymint/p2p/mock_StateGetter.go | 20 +- .../settlement/dymension/mock_CosmosClient.go | 98 ++-- .../dymint/settlement/mock_ClientI.go | 142 +++--- .../dymensionxyz/dymint/store/mock_Store.go | 272 +++++------ .../sequencer/types/mock_QueryClient.go | 92 ++-- .../dymension/rollapp/mock_QueryClient.go | 128 ++--- .../dymension/sequencer/mock_QueryClient.go | 104 ++-- .../tendermint/abci/types/mock_Application.go | 110 ++--- .../tendermint/proxy/mock_AppConnConsensus.go | 60 +-- .../tendermint/proxy/mock_AppConns.go | 94 ++-- node/events/types.go | 12 +- node/mempool/mempool.go | 18 +- node/node.go | 46 +- p2p/block.go | 22 +- p2p/block_sync.go | 42 +- p2p/block_sync_dag.go | 30 +- p2p/blocks_received.go | 12 +- p2p/client.go | 140 +++--- p2p/events.go | 18 +- p2p/gossip.go | 26 +- p2p/validator.go | 22 +- rpc/client/client.go | 233 +++++---- rpc/client/utils.go | 12 +- rpc/json/handler.go | 30 +- rpc/json/service.go | 22 +- rpc/json/types.go | 22 +- rpc/json/ws.go | 8 +- rpc/middleware/client.go | 8 +- rpc/middleware/registry.go | 12 +- rpc/middleware/status.go | 2 +- rpc/server.go | 30 +- settlement/config.go | 6 +- settlement/dymension/cosmosclient.go | 10 +- settlement/dymension/dymension.go | 98 ++-- settlement/dymension/events.go | 20 +- settlement/dymension/options.go | 12 +- settlement/dymension/utils.go | 8 +- settlement/errors.go | 2 +- settlement/events.go | 14 +- settlement/grpc/grpc.go | 42 +- settlement/local/local.go | 50 +- settlement/registry/registry.go | 14 +- settlement/settlement.go | 64 +-- store/badger.go | 88 ++-- store/prefix.go | 26 +- store/pruning.go | 8 +- store/store.go | 48 +- store/storeIface.go | 54 +-- test/loadtime/cmd/load/main.go | 22 +- test/loadtime/cmd/report/main.go | 6 +- test/loadtime/payload/payload.go | 30 +- test/loadtime/report/report.go | 74 +-- testutil/block.go | 14 +- testutil/logger.go | 24 +- testutil/mocks.go | 52 +- testutil/node.go | 6 +- testutil/p2p.go | 6 +- testutil/rpc.go | 4 +- testutil/types.go | 26 +- types/batch.go | 20 +- types/block.go | 66 +-- types/block_source.go | 2 +- types/conv.go | 32 +- types/errors.go | 56 +-- types/evidence.go | 22 +- types/hashing.go | 4 +- types/instruction.go | 2 +- types/logger.go | 2 +- .../dymensionxyz/dymension/rollapp/errors.go | 6 +- .../dymensionxyz/dymension/rollapp/events.go | 4 +- .../pb/dymensionxyz/dymension/rollapp/keys.go | 10 +- .../dymension/rollapp/message_update_state.go | 10 +- .../dymensionxyz/dymension/rollapp/params.go | 2 +- .../dymension/sequencer/events.go | 14 +- .../dymensionxyz/dymension/sequencer/keys.go | 40 +- .../dymension/sequencer/params.go | 2 +- types/rollapp.go | 2 +- types/sequencer_set.go | 68 +-- types/serialization.go | 80 ++-- types/state.go | 38 +- types/tx.go | 14 +- types/validation.go | 12 +- utils/atomic/funcs.go | 8 +- utils/channel/funcs.go | 12 +- utils/errors/err_group.go | 14 +- utils/event/funcs.go | 10 +- utils/queue/queue.go | 16 +- utils/retry/backoff.go | 16 +- utils/retry/doc.go | 6 +- version/version.go | 2 +- 162 files changed, 3486 insertions(+), 3530 deletions(-) diff --git a/block/balance.go b/block/balance.go index 9c0e301fe..f77b518f3 100644 --- a/block/balance.go +++ b/block/balance.go @@ -14,7 +14,7 @@ import ( const CheckBalancesInterval = 3 * time.Minute -// MonitorBalances checks the balances of the node and updates the gauges for prometheus + func (m *Manager) MonitorBalances(ctx context.Context) error { ticker := time.NewTicker(CheckBalancesInterval) defer ticker.Stop() diff --git a/block/block.go b/block/block.go index 4b4562794..b8a6f3913 100644 --- a/block/block.go +++ b/block/block.go @@ -11,12 +11,12 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// applyBlockWithFraudHandling calls applyBlock and validateBlockBeforeApply with fraud handling. + func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { validateWithFraud := func() error { if err := m.validateBlockBeforeApply(block, commit); err != nil { m.blockCache.Delete(block.Header.Height) - // TODO: can we take an action here such as dropping the peer / reducing their reputation? + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) } @@ -29,27 +29,27 @@ func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types. err := validateWithFraud() if errors.Is(err, gerrc.ErrFault) { - // Here we handle the fault by calling the fraud handler. - // FraudHandler is an interface that defines a method to handle faults. Implement this interface to handle faults - // in specific ways. For example, once a fault is detected, it publishes a DataHealthStatus event to the - // pubsub which sets the node in a frozen state. + + + + m.FraudHandler.HandleFault(m.Ctx, err) } return err } -// applyBlock applies the block to the store and the abci app. -// Contract: block and commit must be validated before calling this function! -// steps: save block -> execute block with app -> update state -> commit block to app -> update state's height and commit result. -// As the entire process can't be atomic we need to make sure the following condition apply before -// - block height is the expected block height on the store (height + 1). -// - block height is the expected block height on the app (last block height + 1). + + + + + + func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { var retainHeight int64 - // TODO: add switch case to have defined behavior for each case. - // validate block height + + if block.Header.Height != m.State.NextHeight() { return types.ErrInvalidBlockHeight } @@ -58,13 +58,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("Applying block", "height", block.Header.Height, "source", blockMetaData.Source.String()) - // Check if the app's last block height is the same as the currently produced block height + isBlockAlreadyApplied, err := m.isHeightAlreadyApplied(block.Header.Height) if err != nil { return fmt.Errorf("check if block is already applied: %w", err) } - // In case the following true, it means we crashed after the app commit but before updating the state - // In that case we'll want to align the state with the app commit result, as if the block was applied. + + if isBlockAlreadyApplied { err := m.UpdateStateFromApp(block.Header.Hash()) if err != nil { @@ -73,7 +73,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Info("updated state from app commit", "height", block.Header.Height) } else { var appHash []byte - // Start applying the block assuming no inconsistency was found. + _, err = m.Store.SaveBlock(block, commit, nil) if err != nil { return fmt.Errorf("save block: %w", err) @@ -104,15 +104,15 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("add drs version: %w", err) } - // Commit block to app + appHash, retainHeight, err = m.Executor.Commit(m.State, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } - // Prune old heights, if requested by ABCI app. - // retainHeight is determined by currentHeight - min-retain-blocks (app.toml config). - // Unless max_age_num_blocks in consensus params is higher than min-retain-block, then max_age_num_blocks will be used instead of min-retain-blocks. + + + if 0 < retainHeight { select { @@ -121,25 +121,25 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("pruning channel full. skipping pruning", "retainHeight", retainHeight) } } - // Update the state with the new app hash, and store height from the commit. - // Every one of those, if happens before commit, prevents us from re-executing the block in case failed during commit. + + m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, block.Header.Hash()) } - // save last block time used to calculate batch skew time + m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) - // Update the store: - // 1. Save the proposer for the current height to the store. - // 2. Update the proposer in the state in case of rotation. - // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). - // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). - // here, (3) helps properly handle reboots (specifically when there's rotation). - // If reboot happens after block H (which rotates seqA -> seqB): - // - Block H+1 will be signed by seqB. - // - The state must have seqB as proposer. - - // Proposer cannot be empty while applying the block + + + + + + + + + + + proposer := m.State.GetProposer() if proposer == nil { return fmt.Errorf("logic error: got nil proposer while applying block") @@ -147,28 +147,28 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta batch := m.Store.NewBatch() - // 1. Save the proposer for the current height to the store. - // Proposer in the store is used for RPC queries. + + batch, err = m.Store.SaveProposer(block.Header.Height, *proposer, batch) if err != nil { return fmt.Errorf("save proposer: %w", err) } - // 2. Update the proposer in the state in case of rotation happened on the rollapp level (not necessarily on the hub yet). + isProposerUpdated := m.Executor.UpdateProposerFromBlock(m.State, m.Sequencers, block) - // 3. Save the state to the store (independently of the height). Here the proposer might differ from (1). + batch, err = m.Store.SaveState(m.State, batch) if err != nil { return fmt.Errorf("update state: %w", err) } - // 4. Save the last block sequencer set to the store if it's present (only applicable in the sequencer mode). - // The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary - // UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set - // is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, - // on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. - // Though single-time duplication it's not a big deal. + + + + + + if len(blockMetaData.SequencerSet) != 0 { batch, err = m.Store.SaveLastBlockSequencerSet(blockMetaData.SequencerSet, batch) if err != nil { @@ -185,16 +185,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.blockCache.Delete(block.Header.Height) - // validate whether configuration params and rollapp consensus params keep in line, after rollapp params are updated from the responses received in the block execution + err = m.ValidateConfigWithRollappParams() if err != nil { return err } - // Check if there was an Update for the proposer and if I am the new proposer. - // If so, restart so I can start as the proposer. - // For current proposer, we don't want to restart because we still need to send the last batch. - // This will be done as part of the `rotate` function. + + + + if isProposerUpdated && m.AmIProposerOnRollapp() { panic("I'm the new Proposer now. restarting as a proposer") } @@ -202,16 +202,16 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return nil } -// isHeightAlreadyApplied checks if the block height is already applied to the app. + func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return false, errorsmod.Wrap(err, "get app info") } - isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight //nolint:gosec // LastBlockHeight is always positive + isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight - // TODO: add switch case to validate better the current app state + return isBlockAlreadyApplied, nil } @@ -240,7 +240,7 @@ func (m *Manager) attemptApplyCachedBlocks() error { return nil } -// This function validates the block and commit against the state before applying it. + func (m *Manager) validateBlockBeforeApply(block *types.Block, commit *types.Commit) error { return types.ValidateProposedTransition(m.State, block, commit, m.State.GetProposerPubKey()) } diff --git a/block/block_cache.go b/block/block_cache.go index b224f69fc..b74176d9e 100644 --- a/block/block_cache.go +++ b/block/block_cache.go @@ -5,7 +5,7 @@ import ( ) type Cache struct { - // concurrency managed by Manager.retrieverMu mutex + cache map[uint64]types.CachedBlock } diff --git a/block/consensus.go b/block/consensus.go index 94ce55f15..87cc6c39d 100644 --- a/block/consensus.go +++ b/block/consensus.go @@ -47,7 +47,7 @@ func ConsensusMsgSigner(m proto.Message) (sdk.AccAddress, error) { } } -// ConsensusMsgsOnSequencerSetUpdate forms a list of consensus messages to handle the sequencer set update. + func ConsensusMsgsOnSequencerSetUpdate(newSequencers []types.Sequencer) ([]proto.Message, error) { msgs := make([]proto.Message, 0, len(newSequencers)) for _, s := range newSequencers { diff --git a/block/executor.go b/block/executor.go index f3a1421c5..9f7d72f8b 100644 --- a/block/executor.go +++ b/block/executor.go @@ -19,7 +19,7 @@ import ( protoutils "github.com/dymensionxyz/dymint/utils/proto" ) -// default minimum block max size allowed. not specific reason to set it to 10K, but we need to avoid no transactions can be included in a block. + const minBlockMaxBytes = 10000 type ExecutorI interface { @@ -33,7 +33,7 @@ type ExecutorI interface { UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool - /* Consensus Messages */ + AddConsensusMsgs(...proto2.Message) GetConsensusMsgs() []proto2.Message @@ -41,7 +41,7 @@ type ExecutorI interface { var _ ExecutorI = new(Executor) -// Executor creates and applies blocks and maintains state. + type Executor struct { localAddress []byte chainID string @@ -55,8 +55,8 @@ type Executor struct { logger types.Logger } -// NewExecutor creates new instance of BlockExecutor. -// localAddress will be used in sequencer mode only. + + func NewExecutor( localAddress []byte, chainID string, @@ -79,23 +79,23 @@ func NewExecutor( return &be, nil } -// AddConsensusMsgs adds new consensus msgs to the queue. -// The method is thread-safe. + + func (e *Executor) AddConsensusMsgs(msgs ...proto2.Message) { e.consensusMsgQueue.Add(msgs...) } -// GetConsensusMsgs dequeues consensus msgs from the queue. -// The method is thread-safe. + + func (e *Executor) GetConsensusMsgs() []proto2.Message { return e.consensusMsgQueue.Get() } -// InitChain calls InitChainSync using consensus connection to app. + func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string, valset []*tmtypes.Validator) (*abci.ResponseInitChain, error) { valUpdates := abci.ValidatorUpdates{} - // prepare the validator updates as expected by the ABCI app + for _, validator := range valset { tmkey, err := tmcrypto.PubKeyToProto(validator.PubKey) if err != nil { @@ -136,7 +136,7 @@ func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string }) } -// CreateBlock reaps transactions from mempool and builds a block. + func (e *Executor) CreateBlock( height uint64, lastCommit *types.Commit, @@ -144,8 +144,8 @@ func (e *Executor) CreateBlock( state *types.State, maxBlockDataSizeBytes uint64, ) *types.Block { - maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) //nolint:gosec // MaxBytes is always positive - mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) //nolint:gosec // size is always positive and falls in int64 + maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) + mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) block := &types.Block{ Header: types.Header{ @@ -178,7 +178,7 @@ func (e *Executor) CreateBlock( return block } -// Commit commits the block + func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { @@ -193,7 +193,7 @@ func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate. return appHash, retainHeight, nil } -// GetAppInfo returns the latest AppInfo from the proxyApp. + func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } @@ -214,7 +214,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas - err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) //nolint:gosec // height is non-negative and falls in int64 + err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) if err != nil { return nil, 0, err } @@ -224,7 +224,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* return resp.Data, resp.RetainHeight, err } -// ExecuteBlock executes the block and returns the ABCIResponses. Block should be valid (passed validation checks). + func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, error) { abciResponses := new(tmstate.ABCIResponses) abciResponses.DeliverTxs = make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) @@ -273,7 +273,7 @@ func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, err } } - abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) //nolint:gosec // height is non-negative and falls in int64 + abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) if err != nil { return nil, err } @@ -305,14 +305,14 @@ func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block for _, ev := range abciBlock.Evidence.Evidence { err = multierr.Append(err, e.eventBus.PublishEventNewEvidence(tmtypes.EventDataNewEvidence{ Evidence: ev, - Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(block.Header.Height), })) } for i, dtx := range resp.DeliverTxs { err = multierr.Append(err, e.eventBus.PublishEventTx(tmtypes.EventDataTx{ TxResult: abci.TxResult{ - Height: int64(block.Header.Height), //nolint:gosec // block height is within int64 range - Index: uint32(i), //nolint:gosec // num of deliver txs is less than 2^32 + Height: int64(block.Header.Height), + Index: uint32(i), Tx: abciBlock.Data.Txs[i], Result: *dtx, }, diff --git a/block/fork.go b/block/fork.go index c559ba132..5f1ff5878 100644 --- a/block/fork.go +++ b/block/fork.go @@ -20,9 +20,9 @@ const ( ForkMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." ) -// MonitorForkUpdateLoop monitors the hub for fork updates in a loop + func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { - ticker := time.NewTicker(ForkMonitorInterval) // TODO make this configurable + ticker := time.NewTicker(ForkMonitorInterval) defer ticker.Stop() for { @@ -37,7 +37,7 @@ func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { } } -// checkForkUpdate checks if the hub has a fork update + func (m *Manager) checkForkUpdate(msg string) error { defer m.forkMu.Unlock() m.forkMu.Lock() @@ -69,7 +69,7 @@ func (m *Manager) checkForkUpdate(msg string) error { return nil } -// createInstruction returns instruction with fork information + func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Instruction, error) { obsoleteDrs, err := m.SLClient.GetObsoleteDrs() if err != nil { @@ -85,11 +85,11 @@ func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Inst return instruction, nil } -// shouldStopNode determines if a rollapp node should be stopped based on revision criteria. -// -// This method checks two conditions to decide if a node should be stopped: -// 1. If the next state height is greater than or equal to the rollapp's revision start height. -// 2. If the block's app version (equivalent to revision) is less than the rollapp's revision + + + + + func shouldStopNode( expectedRevision types.Revision, nextHeight uint64, @@ -98,7 +98,7 @@ func shouldStopNode( return nextHeight >= expectedRevision.StartHeight && actualRevisionNumber < expectedRevision.Number } -// getRevisionFromSL returns revision data for the specific height + func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { rollapp, err := m.SLClient.GetRollapp() if err != nil { @@ -107,26 +107,26 @@ func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { return rollapp.GetRevisionForHeight(height), nil } -// doFork creates fork blocks and submits a new batch with them + func (m *Manager) doFork(instruction types.Instruction) error { - // if fork (two) blocks are not produced and applied yet, produce them + if m.State.Height() < instruction.RevisionStartHeight+1 { - // add consensus msgs to upgrade DRS to running node version (msg is created in all cases and RDK will upgrade if necessary). If returns error if running version is deprecated. + consensusMsgs, err := m.prepareDRSUpgradeMessages(instruction.FaultyDRS) if err != nil { return fmt.Errorf("prepare DRS upgrade messages: %v", err) } - // add consensus msg to bump the account sequences in all fork cases + consensusMsgs = append(consensusMsgs, &sequencers.MsgBumpAccountSequences{Authority: authtypes.NewModuleAddress("sequencers").String()}) - // create fork blocks + err = m.createForkBlocks(instruction, consensusMsgs) if err != nil { return fmt.Errorf("validate fork blocks: %v", err) } } - // submit fork batch including two fork blocks + if err := m.submitForkBatch(instruction.RevisionStartHeight); err != nil { return fmt.Errorf("submit fork batch: %v", err) } @@ -134,13 +134,13 @@ func (m *Manager) doFork(instruction types.Instruction) error { return nil } -// prepareDRSUpgradeMessages prepares consensus messages for DRS upgrades. -// It performs version validation and generates the necessary upgrade messages for the sequencer. -// -// The function implements the following logic: -// - If no faulty DRS version is provided (faultyDRS is nil), returns no messages -// - Validates the current DRS version against the potentially faulty version -// - Generates an upgrade message with the current valid DRS version + + + + + + + func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Message, error) { drsVersion, err := version.GetDRSVersion() if err != nil { @@ -161,13 +161,13 @@ func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Messa }, nil } -// create the first two blocks of the new revision -// the first one should have a cons message(s) -// both should not have tx's + + + func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs []proto.Message) error { nextHeight := m.State.NextHeight() - // Revise already created fork blocks + for h := instruction.RevisionStartHeight; h < nextHeight; h++ { b, err := m.Store.LoadBlock(h) if err != nil { @@ -183,7 +183,7 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs } } - // create two empty blocks including consensus msgs in the first one + for h := nextHeight; h < instruction.RevisionStartHeight+2; h++ { if h == instruction.RevisionStartHeight { m.Executor.AddConsensusMsgs(consensusMsgs...) @@ -201,13 +201,13 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs return nil } -// submitForkBatch verifies and, if necessary, creates a batch at the specified height. -// This function is critical for maintaining batch consistency in the blockchain while -// preventing duplicate batch submissions. -// -// The function performs the following operations: -// 1. Checks for an existing batch at the specified height via SLClient -// 2. If no batch exists, creates and submits a new one + + + + + + + func (m *Manager) submitForkBatch(height uint64) error { resp, err := m.SLClient.GetBatchAtHeight(height) if err != nil && !errors.Is(err, gerrc.ErrNotFound) { @@ -225,62 +225,62 @@ func (m *Manager) submitForkBatch(height uint64) error { return nil } -// updateStateForNextRevision updates dymint stored state in case next height corresponds to a new revision, to enable syncing (and validation) for rollapps with multiple revisions. + func (m *Manager) updateStateForNextRevision() error { - // in case fork is detected dymint state needs to be updated + - // get next revision according to node height + nextRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - // if next height is revision start height, update local state + if nextRevision.StartHeight == m.State.NextHeight() { - // Set proposer to nil to force updating it from SL + m.State.SetProposer(nil) - // Upgrade revision on state + m.State.RevisionStartHeight = nextRevision.StartHeight m.State.SetRevision(nextRevision.Number) - // update stored state + _, err = m.Store.SaveState(m.State, nil) return err } return nil } -// doForkWhenNewRevision creates and submit to SL fork blocks according to next revision start height. + func (m *Manager) doForkWhenNewRevision() error { defer m.forkMu.Unlock() m.forkMu.Lock() - // get revision next height + expectedRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - // create fork batch in case it has not been submitted yet + if m.LastSettlementHeight.Load() < expectedRevision.StartHeight { instruction, err := m.createInstruction(expectedRevision) if err != nil { return err } - // update revision with revision after fork + m.State.SetRevision(instruction.Revision) - // create and submit fork batch + err = m.doFork(instruction) if err != nil { return err } } - // this cannot happen. it means the revision number obtained is not the same or the next revision. unable to fork. + if expectedRevision.Number != m.State.GetRevision() { panic("Inconsistent expected revision number from Hub. Unable to fork") } - // remove instruction file after fork + return types.DeleteInstructionFromDisk(m.RootDir) } diff --git a/block/fraud.go b/block/fraud.go index 11a95c493..f543420eb 100644 --- a/block/fraud.go +++ b/block/fraud.go @@ -4,16 +4,16 @@ import ( "context" ) -// FraudHandler is an interface that defines a method to handle faults. -// Contract: should not be blocking. + + type FraudHandler interface { - // HandleFault handles a fault that occurred in the system. - // The fault is passed as an error type. + + HandleFault(ctx context.Context, fault error) } -// FreezeHandler is used to handle faults coming from executing and validating blocks. -// once a fault is detected, it publishes a DataHealthStatus event to the pubsub which sets the node in a frozen state. + + type FreezeHandler struct { m *Manager } diff --git a/block/initchain.go b/block/initchain.go index 604bbe3cb..48fea86a7 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -11,8 +11,8 @@ import ( ) func (m *Manager) RunInitChain() error { - // Get the proposer at the initial height. If we're at genesis the height will be 0. - proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) //nolint:gosec // height is non-negative and falls in int64 + + proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } @@ -25,13 +25,13 @@ func (m *Manager) RunInitChain() error { return err } - // validate the resulting genesis bridge data against the hub + err = m.ValidateGenesisBridgeData(res.GenesisBridgeDataBytes) if err != nil { return fmt.Errorf("Cannot validate genesis bridge data: %w. Please call `$EXECUTABLE dymint unsafe-reset-all` before the next launch to reset this node to genesis state.", err) } - // update the state with only the consensus pubkey + m.Executor.UpdateStateAfterInitChain(m.State, res) m.Executor.UpdateMempoolAfterInitChain(m.State) if _, err := m.Store.SaveState(m.State, nil); err != nil { @@ -41,8 +41,8 @@ func (m *Manager) RunInitChain() error { return nil } -// ValidateGenesisBridgeData validates the genesis bridge data from -// InitChainResponse against the rollapp genesis stored in the hub. + + func (m *Manager) ValidateGenesisBridgeData(dataBytes []byte) error { if len(dataBytes) == 0 { return fmt.Errorf("genesis bridge data is empty in InitChainResponse") diff --git a/block/manager.go b/block/manager.go index 61d74a6ab..06594e29d 100644 --- a/block/manager.go +++ b/block/manager.go @@ -36,99 +36,95 @@ import ( ) const ( - // RunModeProposer represents a node running as a proposer + RunModeProposer uint = iota - // RunModeFullNode represents a node running as a full node + RunModeFullNode ) -// Manager is responsible for aggregating transactions into blocks. + type Manager struct { logger types.Logger - // Configuration + Conf config.BlockManagerConfig Genesis *tmtypes.GenesisDoc GenesisChecksum string LocalKey crypto.PrivKey RootDir string - // Store and execution + Store store.Store State *types.State Executor ExecutorI - Sequencers *types.SequencerSet // Sequencers is the set of sequencers that are currently active on the rollapp + Sequencers *types.SequencerSet - // Clients and servers + Pubsub *pubsub.Server P2PClient *p2p.Client DAClient da.DataAvailabilityLayerClient SLClient settlement.ClientI - // RunMode represents the mode of the node. Set during initialization and shouldn't change after that. + RunMode uint - // context used when freezing node + Cancel context.CancelFunc Ctx context.Context - // LastBlockTimeInSettlement is the time of last submitted block, used to measure batch skew time + LastBlockTimeInSettlement atomic.Int64 - // LastBlockTime is the time of last produced block, used to measure batch skew time + LastBlockTime atomic.Int64 - // mutex used to avoid stopping node when fork is detected but proposer is creating/sending fork batch + forkMu sync.Mutex - /* - Sequencer and full-node - */ - // The last height which was submitted to settlement, that we know of. When we produce new batches, we will - // start at this height + 1. - // It is ALSO used by the producer, because the producer needs to check if it can prune blocks and it won't - // prune anything that might be submitted in the future. Therefore, it must be atomic. + + + + + LastSettlementHeight atomic.Uint64 - // channel used to send the retain height to the pruning background loop + pruningC chan int64 - // indexer + IndexerService *txindex.IndexerService - // used to fetch blocks from DA. Sequencer will only fetch batches in case it requires to re-sync (in case of rollback). Full-node will fetch batches for syncing and validation. + Retriever da.BatchRetriever - /* - Full-node only - */ - // Protect against processing two blocks at once when there are two routines handling incoming gossiped blocks, - // and incoming DA blocks, respectively. + + + retrieverMu sync.Mutex - // Cached blocks and commits, coming from P2P, for applying at future heights. The blocks may not be valid, because - // we can only do full validation in sequential order. + + blockCache *Cache - // TargetHeight holds the value of the current highest block seen from either p2p (probably higher) or the DA + TargetHeight atomic.Uint64 - // Fraud handler + FraudHandler FraudHandler - // channel used to signal the syncing loop when there is a new state update available + settlementSyncingC chan struct{} - // channel used to signal the validation loop when there is a new state update available + settlementValidationC chan struct{} - // notifies when the node has completed syncing + syncedFromSettlement *uchannel.Nudger - // validates all non-finalized state updates from settlement, checking there is consistency between DA and P2P blocks, and the information in the state update. + SettlementValidator *SettlementValidator } -// NewManager creates new block Manager. + func NewManager( localKey crypto.PrivKey, conf config.NodeConfig, @@ -155,7 +151,7 @@ func NewManager( mempool, proxyApp, eventBus, - NewConsensusMsgQueue(), // TODO properly specify ConsensusMsgStream: https://github.com/dymensionxyz/dymint/issues/1125 + NewConsensusMsgQueue(), logger, ) if err != nil { @@ -179,10 +175,10 @@ func NewManager( blockCache: &Cache{ cache: make(map[uint64]types.CachedBlock), }, - pruningC: make(chan int64, 10), // use of buffered channel to avoid blocking applyBlock thread. In case channel is full, pruning will be skipped, but the retain height can be pruned in the next iteration. - settlementSyncingC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing syncing process, but syncing height is updated, which means the ongoing syncing will sync to the new height. - settlementValidationC: make(chan struct{}, 1), // use of buffered channel to avoid blocking. In case channel is full, its skipped because there is an ongoing validation process, but validation height is updated, which means the ongoing validation will validate to the new height. - syncedFromSettlement: uchannel.NewNudger(), // used by the sequencer to wait till the node completes the syncing from settlement. + pruningC: make(chan int64, 10), + settlementSyncingC: make(chan struct{}, 1), + settlementValidationC: make(chan struct{}, 1), + syncedFromSettlement: uchannel.NewNudger(), } m.setFraudHandler(NewFreezeHandler(m)) err = m.LoadStateOnInit(store, genesis, logger) @@ -195,13 +191,13 @@ func NewManager( return nil, err } - // update dymint state with next revision info + err = m.updateStateForNextRevision() if err != nil { return nil, err } - // validate configuration params and rollapp consensus params are in line + err = m.ValidateConfigWithRollappParams() if err != nil { return nil, err @@ -212,10 +208,10 @@ func NewManager( return m, nil } -// Start starts the block manager. + func (m *Manager) Start(ctx context.Context) error { m.Ctx, m.Cancel = context.WithCancel(ctx) - // Check if InitChain flow is needed + if m.State.IsGenesis() { m.logger.Info("Running InitChain") @@ -225,9 +221,9 @@ func (m *Manager) Start(ctx context.Context) error { } } - // Check if a proposer on the rollapp is set. In case no proposer is set on the Rollapp, fallback to the hub proposer (If such exists). - // No proposer on the rollapp means that at some point there was no available proposer. - // In case there is also no proposer on the hub to our current height, it means that the chain is halted. + + + if m.State.GetProposer() == nil { m.logger.Info("No proposer on the rollapp, fallback to the hub proposer, if available") err := m.UpdateProposerFromSL() @@ -240,10 +236,10 @@ func (m *Manager) Start(ctx context.Context) error { } } - // checks if the the current node is the proposer either on rollapp or on the hub. - // In case of sequencer rotation, there's a phase where proposer rotated on Rollapp but hasn't yet rotated on hub. - // for this case, 2 nodes will get `true` for `AmIProposer` so the l2 proposer can produce blocks and the hub proposer can submit his last batch. - // The hub proposer, after sending the last state update, will panic and restart as full node. + + + + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -253,30 +249,30 @@ func (m *Manager) Start(ctx context.Context) error { m.logger.Info("starting block manager", "mode", map[bool]string{true: "proposer", false: "full node"}[amIProposer]) - // update local state from latest state in settlement + err = m.updateFromLastSettlementState() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - // send signal to syncing loop with last settlement state update + m.triggerSettlementSyncing() - // send signal to validation loop with last settlement state update + m.triggerSettlementValidation() eg, ctx := errgroup.WithContext(m.Ctx) - // Start the pruning loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.PruningLoop(ctx) }) - // Start the settlement sync loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementSyncLoop(ctx) }) - // Monitor sequencer set updates + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorSequencerSetUpdates(ctx) }) @@ -289,7 +285,7 @@ func (m *Manager) Start(ctx context.Context) error { return m.MonitorBalances(ctx) }) - // run based on the node role + if !amIProposer { return m.runAsFullNode(ctx, eg) } @@ -301,26 +297,26 @@ func (m *Manager) NextHeightToSubmit() uint64 { return m.LastSettlementHeight.Load() + 1 } -// updateFromLastSettlementState retrieves last sequencers and state update from the Hub and updates local state with it + func (m *Manager) updateFromLastSettlementState() error { - // Update sequencers list from SL + err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } - // update latest height from SL + latestHeight, err := m.SLClient.GetLatestHeight() if errors.Is(err, gerrc.ErrNotFound) { - // The SL hasn't got any batches for this chain yet. + m.logger.Info("No batches for chain found in SL.") - m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) //nolint:gosec // height is non-negative and falls in int64 + m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) m.LastBlockTimeInSettlement.Store(m.Genesis.GenesisTime.UTC().UnixNano()) return nil } if err != nil { - // TODO: separate between fresh rollapp and non-registered rollapp + return err } @@ -331,10 +327,10 @@ func (m *Manager) updateFromLastSettlementState() error { m.LastSettlementHeight.Store(latestHeight) - // init last block in settlement time in dymint state to calculate batch submit skew time + m.SetLastBlockTimeInSettlementFromHeight(latestHeight) - // init last block time in dymint state to calculate batch submit skew time + block, err := m.Store.LoadBlock(m.State.Height()) if err == nil { m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) @@ -343,7 +339,7 @@ func (m *Manager) updateFromLastSettlementState() error { } func (m *Manager) updateLastFinalizedHeightFromSettlement() error { - // update latest finalized height from SL + height, err := m.SLClient.GetLatestFinalizedHeight() if errors.Is(err, gerrc.ErrNotFound) { m.logger.Info("No finalized batches for chain found in SL.") @@ -372,7 +368,7 @@ func (m *Manager) UpdateTargetHeight(h uint64) { } } -// ValidateConfigWithRollappParams checks the configuration params are consistent with the params in the dymint state (e.g. DA and version) + func (m *Manager) ValidateConfigWithRollappParams() error { if da.Client(m.State.RollappParams.Da) != m.DAClient.GetClientType() { return fmt.Errorf("da client mismatch. rollapp param: %s da configured: %s", m.State.RollappParams.Da, m.DAClient.GetClientType()) @@ -385,7 +381,7 @@ func (m *Manager) ValidateConfigWithRollappParams() error { return nil } -// setDA initializes DA client in blockmanager according to DA type set in genesis or stored in state + func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) error { daLayer := m.State.RollappParams.Da dalc := registry.GetClient(daLayer) @@ -406,12 +402,12 @@ func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) err return nil } -// setFraudHandler sets the fraud handler for the block manager. + func (m *Manager) setFraudHandler(handler *FreezeHandler) { m.FraudHandler = handler } -// freezeNode sets the node as unhealthy and prevents the node continues producing and processing blocks + func (m *Manager) freezeNode(err error) { m.logger.Info("Freezing node", "err", err) if m.Ctx.Err() != nil { @@ -421,11 +417,11 @@ func (m *Manager) freezeNode(err error) { m.Cancel() } -// SetLastBlockTimeInSettlementFromHeight is used to initialize LastBlockTimeInSettlement from rollapp height in settlement + func (m *Manager) SetLastBlockTimeInSettlementFromHeight(lastSettlementHeight uint64) { block, err := m.Store.LoadBlock(lastSettlementHeight) if err != nil { - // if settlement height block is not found it will be updated after, when syncing + return } m.LastBlockTimeInSettlement.Store(block.Header.GetTimestamp().UTC().UnixNano()) diff --git a/block/modes.go b/block/modes.go index adfd56432..e8a48d33f 100644 --- a/block/modes.go +++ b/block/modes.go @@ -20,43 +20,43 @@ const ( p2pBlocksyncLoop = "applyBlockSyncBlocksLoop" ) -// setFraudHandler sets the fraud handler for the block manager. + func (m *Manager) runAsFullNode(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "full node") m.RunMode = RunModeFullNode - // update latest finalized height + err := m.updateLastFinalizedHeightFromSettlement() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - // Start the settlement validation loop in the background + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementValidateLoop(ctx) }) m.subscribeFullNodeEvents(ctx) - // remove instruction file after fork to avoid enter fork loop again + return types.DeleteInstructionFromDisk(m.RootDir) } func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "proposer") m.RunMode = RunModeProposer - // Subscribe to batch events, to update last submitted height in case batch confirmation was lost. This could happen if the sequencer crash/restarted just after submitting a batch to the settlement and by the time we query the last batch, this batch wasn't accepted yet. + go uevent.MustSubscribe(ctx, m.Pubsub, "updateSubmittedHeightLoop", settlement.EventQueryNewSettlementBatchAccepted, m.UpdateLastSubmittedHeight, m.logger) - // Subscribe to P2P received blocks events (used for P2P syncing). + go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) - // Sequencer must wait till the DA light client is synced. Otherwise it will fail when submitting blocks. - // Full-nodes does not need to wait, but if it tries to fetch blocks from DA heights previous to the DA light client height it will fail, and it will retry till it reaches the height. + + m.DAClient.WaitForSyncing() - // Sequencer must wait till node is synced till last submittedHeight, in case it is not + m.waitForSettlementSyncing() - // it is checked again whether the node is the active proposer, since this could have changed after syncing. + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -65,28 +65,28 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { return fmt.Errorf("the node is no longer the proposer. please restart.") } - // update l2 proposer from SL in case it changed after syncing + err = m.UpdateProposerFromSL() if err != nil { return err } - // doForkWhenNewRevision executes fork if necessary + err = m.doForkWhenNewRevision() if err != nil { return err } - // check if we should rotate + shouldRotate, err := m.ShouldRotate() if err != nil { return fmt.Errorf("checking should rotate: %w", err) } if shouldRotate { - m.rotate(ctx) // panics afterwards + m.rotate(ctx) } - // populate the bytes produced channel + bytesProducedC := make(chan int) uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -94,18 +94,18 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { }) uerrors.ErrGroupGoLog(eg, m.logger, func() error { - bytesProducedC <- m.GetUnsubmittedBytes() // load unsubmitted bytes from previous run + bytesProducedC <- m.GetUnsubmittedBytes() return m.ProduceBlockLoop(ctx, bytesProducedC) }) - // Monitor and handling of the rotation + uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorProposerRotation(ctx) }) go func() { err = eg.Wait() - // Check if loops exited due to sequencer rotation signal + if errors.Is(err, errRotationRequested) { m.rotate(ctx) } else if err != nil { @@ -118,11 +118,11 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { } func (m *Manager) subscribeFullNodeEvents(ctx context.Context) { - // Subscribe to new (or finalized) state updates events. + go uevent.MustSubscribe(ctx, m.Pubsub, syncLoop, settlement.EventQueryNewSettlementBatchAccepted, m.onNewStateUpdate, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, validateLoop, settlement.EventQueryNewSettlementBatchFinalized, m.onNewStateUpdateFinalized, m.logger) - // Subscribe to P2P received blocks events (used for P2P syncing). + go uevent.MustSubscribe(ctx, m.Pubsub, p2pGossipLoop, p2p.EventQueryNewGossipedBlock, m.OnReceivedBlock, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) } diff --git a/block/p2p.go b/block/p2p.go index 6dcae3c5e..c1c679dd3 100644 --- a/block/p2p.go +++ b/block/p2p.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// onReceivedBlock receives a block received event from P2P, saves the block to a cache and tries to apply the blocks from the cache. + func (m *Manager) OnReceivedBlock(event pubsub.Message) { eventData, ok := event.Data().(p2p.BlockData) if !ok { @@ -40,9 +40,9 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { if block.Header.Height < m.State.NextHeight() { return } - m.retrieverMu.Lock() // needed to protect blockCache access + m.retrieverMu.Lock() - // It is not strictly necessary to return early, for correctness, but doing so helps us avoid mutex pressure and unnecessary repeated attempts to apply cached blocks + if m.blockCache.Has(height) { m.retrieverMu.Unlock() return @@ -54,7 +54,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { m.logger.Debug("Received new block from p2p.", "block height", height, "source", source.String(), "store height", m.State.Height(), "n cachedBlocks", m.blockCache.Size()) m.blockCache.Add(height, &block, &commit, source) - m.retrieverMu.Unlock() // have to give this up as it's locked again in attempt apply, and we're not re-entrant + m.retrieverMu.Unlock() err := m.attemptApplyCachedBlocks() if err != nil { @@ -63,7 +63,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { } } -// gossipBlock sends created blocks by the sequencer to full-nodes using P2P gossipSub + func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { m.logger.Info("Gossipping block", "height", block.Header.Height) gossipedBlock := p2p.BlockData{Block: block, Commit: commit} @@ -72,15 +72,15 @@ func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit typ return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) } if err := m.P2PClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { - // Although this boils down to publishing on a topic, we don't want to speculate too much on what - // could cause that to fail, so we assume recoverable. + + return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) } return nil } -// This function adds the block to blocksync store to enable P2P retrievability + func (m *Manager) saveP2PBlockToBlockSync(block *types.Block, commit *types.Commit) error { gossipedBlock := p2p.BlockData{Block: *block, Commit: *commit} gossipedBlockBytes, err := gossipedBlock.MarshalBinary() diff --git a/block/produce.go b/block/produce.go index 9a67fe77b..a2d4ffa64 100644 --- a/block/produce.go +++ b/block/produce.go @@ -20,9 +20,9 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// ProduceBlockLoop is calling publishBlock in a loop as long as we're synced. -// A signal will be sent to the bytesProduced channel for each block produced -// In this way it's possible to pause block production by not consuming the channel + + + func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) error { m.logger.Info("Started block producer loop.") @@ -40,12 +40,12 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) case <-ctx.Done(): return nil case <-ticker.C: - // Only produce if I'm the current rollapp proposer. + if !m.AmIProposerOnRollapp() { continue } - // if empty blocks are configured to be enabled, and one is scheduled... + produceEmptyBlock := firstBlock || m.Conf.MaxIdleTime == 0 || nextEmptyBlock.Before(time.Now()) firstBlock = false @@ -54,7 +54,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) m.logger.Error("Produce and gossip: context canceled.", "error", err) return nil } - if errors.Is(err, types.ErrEmptyBlock) { // occurs if the block was empty but we don't want to produce one + if errors.Is(err, types.ErrEmptyBlock) { continue } if errors.Is(err, ErrNonRecoverable) { @@ -68,8 +68,8 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) } nextEmptyBlock = time.Now().Add(m.Conf.MaxIdleTime) if 0 < len(block.Data.Txs) { - // the block wasn't empty so we want to make sure we don't wait too long before producing another one, in order to facilitate proofs for ibc - // TODO: optimize to only do this if IBC transactions are present (https://github.com/dymensionxyz/dymint/issues/709) + + nextEmptyBlock = time.Now().Add(m.Conf.MaxProofTime) } else { m.logger.Info("Produced empty block.") @@ -102,10 +102,10 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) type ProduceBlockOptions struct { AllowEmpty bool MaxData *uint64 - NextProposerHash *[32]byte // optional, used for last block + NextProposerHash *[32]byte } -// ProduceApplyGossipLastBlock produces and applies a block with the given NextProposerHash. + func (m *Manager) ProduceApplyGossipLastBlock(ctx context.Context, nextProposerHash [32]byte) (err error) { _, _, err = m.produceApplyGossip(ctx, ProduceBlockOptions{ AllowEmpty: true, @@ -119,22 +119,22 @@ func (m *Manager) ProduceApplyGossipBlock(ctx context.Context, opts ProduceBlock } func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptions) (block *types.Block, commit *types.Commit, err error) { - // Snapshot sequencer set to check if there are sequencer set updates. - // It fills the consensus messages queue for all the new sequencers. - // - // Note that there cannot be any recoverable errors between when the queue is filled and dequeued; - // otherwise, the queue may grow uncontrollably if there is a recoverable error loop in the middle. - // - // All errors in this method are non-recoverable. + + + + + + + newSequencerSet, err := m.SnapshotSequencerSet() if err != nil { return nil, nil, fmt.Errorf("snapshot sequencer set: %w", err) } - // We do not want to wait for a new block created to propagate a new sequencer set. - // Therefore, we force an empty block if there are any sequencer set updates. + + opts.AllowEmpty = opts.AllowEmpty || len(newSequencerSet) > 0 - // If I'm not the current rollapp proposer, I should not produce a blocks. + block, commit, err = m.produceBlock(opts) if err != nil { return nil, nil, fmt.Errorf("produce block: %w", err) @@ -151,50 +151,50 @@ func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptio return block, commit, nil } -// SnapshotSequencerSet loads two versions of the sequencer set: -// - the one that was used for the last block (from the store) -// - and the most recent one (from the manager memory) -// -// It then calculates the diff between the two and creates consensus messages for the new sequencers, -// i.e., only for the diff between two sets. If there is any diff (i.e., the sequencer set is updated), -// the method returns the entire new set. The new set will be used for next block and will be stored -// in the state instead of the old set after the block production. -// -// The set from the state is dumped to memory on reboots. It helps to avoid sending unnecessary -// UspertSequencer consensus messages on reboots. This is not a 100% solution, because the sequencer set -// is not persisted in the store in full node mode. It's only used in the proposer mode. Therefore, -// on rotation from the full node to the proposer, the sequencer set is duplicated as consensus msgs. -// Though single-time duplication it's not a big deal. + + + + + + + + + + + + + + func (m *Manager) SnapshotSequencerSet() (sequencersAfterUpdate types.Sequencers, err error) { - // the most recent sequencer set + sequencersAfterUpdate = m.Sequencers.GetAll() - // the sequencer set that was used for the last block + lastSequencers, err := m.Store.LoadLastBlockSequencerSet() - // it's okay if the last sequencer set is not found, it can happen on genesis or after - // rotation from the full node to the proposer + + if err != nil && !errors.Is(err, gerrc.ErrNotFound) { - // unexpected error from the store is non-recoverable + return nil, fmt.Errorf("load last block sequencer set: %w: %w", err, ErrNonRecoverable) } - // diff between the two sequencer sets + newSequencers := types.SequencerListRightOuterJoin(lastSequencers, sequencersAfterUpdate) if len(newSequencers) == 0 { - // nothing to upsert, nothing to persist + return nil, nil } - // Create consensus msgs for new sequencers. - // It can fail only on decoding or internal errors this is non-recoverable. + + msgs, err := ConsensusMsgsOnSequencerSetUpdate(newSequencers) if err != nil { return nil, fmt.Errorf("consensus msgs on sequencers set update: %w: %w", err, ErrNonRecoverable) } m.Executor.AddConsensusMsgs(msgs...) - // return the entire new set if there is any update + return sequencersAfterUpdate, nil } @@ -202,18 +202,18 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := m.GetPreviousBlockHashes(newHeight) if err != nil { - // the error here is always non-recoverable, see GetPreviousBlockHashes() for details + return nil, nil, fmt.Errorf("load prev block: %w", err) } var block *types.Block var commit *types.Commit - // Check if there's an already stored block and commit at a newer height - // If there is use that instead of creating a new block + + pendingBlock, err := m.Store.LoadBlock(newHeight) if err == nil { - // Using an existing block + block = pendingBlock commit, err = m.Store.LoadCommit(newHeight) if err != nil { @@ -230,16 +230,16 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C maxBlockDataSize = *opts.MaxData } proposerHashForBlock := [32]byte(m.State.GetProposerHash()) - // if NextProposerHash is set, we create a last block + if opts.NextProposerHash != nil { maxBlockDataSize = 0 proposerHashForBlock = *opts.NextProposerHash } - // dequeue consensus messages for the new sequencers while creating a new block + block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, proposerHashForBlock, m.State, maxBlockDataSize) - // this cannot happen if there are any sequencer set updates - // AllowEmpty should be always true in this case + + if !opts.AllowEmpty && len(block.Data.Txs) == 0 { return nil, nil, fmt.Errorf("%w: %w", types.ErrEmptyBlock, ErrRecoverable) } @@ -255,7 +255,7 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C return block, commit, nil } -// create commit for block + func (m *Manager) createCommit(block *types.Block) (*types.Commit, error) { abciHeaderPb := types.ToABCIHeaderPB(&block.Header) abciHeaderBytes, err := abciHeaderPb.Marshal() @@ -290,7 +290,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, headerHash := block.Header.Hash() vote := tmtypes.Vote{ Type: cmtproto.PrecommitType, - Height: int64(block.Header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(block.Header.Height), Round: 0, Timestamp: voteTimestamp, BlockID: tmtypes.BlockID{Hash: headerHash[:], PartSetHeader: tmtypes.PartSetHeader{ @@ -301,18 +301,18 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, ValidatorIndex: 0, } v := vote.ToProto() - // convert libp2p key to tm key - // TODO: move to types + + rawKey, _ := m.LocalKey.Raw() tmprivkey := tmed25519.PrivKey(rawKey) tmprivkey.PubKey().Bytes() - // Create a mock validator to sign the vote + tmvalidator := tmtypes.NewMockPVWithParams(tmprivkey, false, false) err := tmvalidator.SignVote(m.State.ChainID, v) if err != nil { return nil, err } - // Update the vote with the signature + vote.Signature = v.Signature pubKey := tmprivkey.PubKey() voteSignBytes := tmtypes.VoteSignBytes(m.State.ChainID, v) @@ -322,12 +322,12 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, return vote.Signature, nil } -// GetPreviousBlockHashes returns the hash of the last block and the commit for the last block -// to be used as the previous block hash and commit for the next block + + func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]byte, lastCommit *types.Commit, err error) { - lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) // prev height = forHeight - 1 + lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) if err != nil { - if !m.State.IsGenesis() { // allow prevBlock not to be found only on genesis + if !m.State.IsGenesis() { return [32]byte{}, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } lastHeaderHash = [32]byte{} @@ -336,7 +336,7 @@ func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]b return lastHeaderHash, lastCommit, nil } -// getHeaderHashAndCommit returns the Header Hash and Commit for a given height + func getHeaderHashAndCommit(store store.Store, height uint64) ([32]byte, *types.Commit, error) { lastCommit, err := store.LoadCommit(height) if err != nil { diff --git a/block/pruning.go b/block/pruning.go index 9a92451e9..9576938d1 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -4,9 +4,9 @@ import ( "context" ) -// Prune function prune all block related data from dymint store and blocksync store up to (but not including) retainHeight. + func (m *Manager) Prune(retainHeight uint64) { - // logging pruning result + logResult := func(err error, source string, retainHeight uint64, pruned uint64) { if err != nil { m.logger.Error("pruning", "from", source, "retain height", retainHeight, "err", err) @@ -15,20 +15,20 @@ func (m *Manager) Prune(retainHeight uint64) { } } - // prune blocks from blocksync store + pruned, err := m.P2PClient.RemoveBlocks(context.Background(), retainHeight) logResult(err, "blocksync", retainHeight, pruned) - // prune indexed block and txs and associated events + pruned, err = m.IndexerService.Prune(retainHeight, m.Store) logResult(err, "indexer", retainHeight, pruned) - // prune blocks from dymint store + pruned, err = m.Store.PruneStore(retainHeight, m.logger) logResult(err, "dymint store", retainHeight, pruned) } -//nolint:gosec // height is non-negative and falls in int64 + func (m *Manager) PruningLoop(ctx context.Context) error { for { select { @@ -36,9 +36,9 @@ func (m *Manager) PruningLoop(ctx context.Context) error { return nil case retainHeight := <-m.pruningC: var pruningHeight uint64 - if m.RunMode == RunModeProposer { // do not delete anything that we might submit in future + if m.RunMode == RunModeProposer { pruningHeight = min(m.NextHeightToSubmit(), uint64(retainHeight)) - } else { // do not delete anything that is not validated yet + } else { pruningHeight = min(m.SettlementValidator.NextValidationHeight(), uint64(retainHeight)) } m.Prune(pruningHeight) diff --git a/block/retriever.go b/block/retriever.go index 3475bd398..850a9ed9e 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -22,7 +22,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { m.retrieverMu.Lock() defer m.retrieverMu.Unlock() - // if batch blocks have already been applied skip, otherwise it will fail in endheight validation (it can happen when syncing from blocksync in parallel). + if m.State.Height() > slBatch.EndHeight { return nil } @@ -30,7 +30,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { blockIndex := 0 for _, batch := range batchResp.Batches { for i, block := range batch.Blocks { - // We dont apply a block if not included in the block descriptor (adds support for rollback) + if blockIndex >= len(slBatch.BlockDescriptors) { break } @@ -45,7 +45,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return err } - // We dont validate because validateBlockBeforeApply already checks if the block is already applied, and we don't need to fail there. + err := m.applyBlockWithFraudHandling(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) if err != nil { return fmt.Errorf("apply block: height: %d: %w", block.Header.Height, err) @@ -55,7 +55,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { } } - // validate the batch applied successfully and we are at the end height + if m.State.Height() != slBatch.EndHeight { return fmt.Errorf("state height mismatch: state height: %d: batch end height: %d", m.State.Height(), slBatch.EndHeight) } @@ -63,14 +63,14 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return nil } -// Used it when doing local rollback, and applying same blocks (instead of producing new ones) -// it was used for an edge case, eg: -// seq produced block H and gossiped -// bug in code produces app mismatch across nodes -// bug fixed, state rolled back to H-1 -// if seq produces new block H, it can lead to double signing, as the old block can still be in the p2p network -// ---- -// when this scenario encountered previously, we wanted to apply same block instead of producing new one + + + + + + + + func (m *Manager) applyLocalBlock() error { defer m.retrieverMu.Unlock() m.retrieverMu.Lock() @@ -101,7 +101,7 @@ func (m *Manager) applyLocalBlock() error { } func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - // Check DA client + if daMetaData.Client != m.DAClient.GetClientType() { return da.ResultRetrieveBatch{ BaseResult: da.BaseResult{ @@ -112,9 +112,9 @@ func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveB } } - // batchRes.MetaData includes proofs necessary to open disputes with the Hub + batchRes := m.Retriever.RetrieveBatches(daMetaData) - // TODO(srene) : for invalid transactions there is no specific error code since it will need to be validated somewhere else for fraud proving. - // NMT proofs (availRes.MetaData.Proofs) are included in the result batchRes, necessary to be included in the dispute + + return batchRes } diff --git a/block/sequencers.go b/block/sequencers.go index ca6155397..ab0597222 100644 --- a/block/sequencers.go +++ b/block/sequencers.go @@ -14,7 +14,7 @@ const ( var errRotationRequested = fmt.Errorf("sequencer rotation started. signal to stop production") func (m *Manager) MonitorProposerRotation(ctx context.Context) error { - ticker := time.NewTicker(ProposerMonitorInterval) // TODO: make this configurable + ticker := time.NewTicker(ProposerMonitorInterval) defer ticker.Stop() for { @@ -27,12 +27,12 @@ func (m *Manager) MonitorProposerRotation(ctx context.Context) error { m.logger.Error("Check rotation in progress", "err", err) continue } - // no rotation in progress + if nextProposer == nil { continue } - // we get here once a sequencer rotation signal is received + m.logger.Info("Sequencer rotation started.", "nextSeqAddr", nextProposer.SettlementAddress) return errRotationRequested } @@ -50,18 +50,18 @@ func (m *Manager) MonitorSequencerSetUpdates(ctx context.Context) error { case <-ticker.C: err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } } } } -// AmIProposerOnSL checks if the current node is the proposer on the hub -// Proposer on the Hub is not necessarily the proposer on the Rollapp during rotation phase. + + func (m *Manager) AmIProposerOnSL() (bool, error) { localProposerKeyBytes, _ := m.LocalKey.GetPublic().Raw() - // get hub proposer key + SLProposer, err := m.SLClient.GetProposerAtHeight(-1) if err != nil { return false, fmt.Errorf("get proposer at height: %w", err) @@ -69,8 +69,8 @@ func (m *Manager) AmIProposerOnSL() (bool, error) { return bytes.Equal(SLProposer.PubKey().Bytes(), localProposerKeyBytes), nil } -// AmIProposerOnRollapp checks if the current node is the proposer on the rollapp. -// Proposer on the rollapp is not necessarily the proposer on the hub during rotation phase. + + func (m *Manager) AmIProposerOnRollapp() bool { if m.State.GetProposer() == nil { return false @@ -81,8 +81,8 @@ func (m *Manager) AmIProposerOnRollapp() bool { return bytes.Equal(rollappProposer, localProposerKeyBytes) } -// ShouldRotate checks if the we are in the middle of rotation and we are the rotating proposer (i.e current proposer on the hub). -// We check it by checking if there is a "next" proposer on the hub which is not us. + + func (m *Manager) ShouldRotate() (bool, error) { nextProposer, err := m.SLClient.GetNextProposer() if err != nil { @@ -91,8 +91,8 @@ func (m *Manager) ShouldRotate() (bool, error) { if nextProposer == nil { return false, nil } - // At this point we know that there is a next proposer, - // so we should rotate only if we are the current proposer on the hub + + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return false, fmt.Errorf("am i proposer on SL: %w", err) @@ -100,13 +100,13 @@ func (m *Manager) ShouldRotate() (bool, error) { return amIProposerOnSL, nil } -// rotate rotates current proposer by doing the following: -// 1. Creating last block with the new proposer, which will stop him from producing blocks. -// 2. Submitting the last batch -// 3. Panicing so the node restarts as full node -// Note: In case he already created his last block, he will only try to submit the last batch. + + + + + func (m *Manager) rotate(ctx context.Context) { - // Get Next Proposer from SL. We assume such exists (even if empty proposer) otherwise function wouldn't be called. + nextProposer, err := m.SLClient.GetNextProposer() if err != nil || nextProposer == nil { panic(fmt.Sprintf("rotate: fetch next proposer set from Hub: %v", err)) @@ -127,8 +127,8 @@ func (m *Manager) rotate(ctx context.Context) { panic("rotate: sequencer is no longer the proposer. restarting as a full node") } -// CreateAndPostLastBatch creates and posts the last batch to the hub -// this called after manager shuts down the block producer and submitter + + func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]byte) error { h := m.State.Height() block, err := m.Store.LoadBlock(h) @@ -136,8 +136,8 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return fmt.Errorf("load block: height: %d: %w", h, err) } - // check if the last block already produced with NextProposerHash set. - // After creating the last block, the sequencer will be restarted so it will not be able to produce blocks anymore. + + if bytes.Equal(block.Header.NextSequencersHash[:], nextSeqHash[:]) { m.logger.Debug("Last block already produced and applied.") } else { @@ -147,7 +147,7 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by } } - // Submit all data accumulated thus far and the last state update + for { b, err := m.CreateAndSubmitBatch(m.Conf.BatchSubmitBytes, true) if err != nil { @@ -162,9 +162,9 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return nil } -// UpdateSequencerSetFromSL updates the sequencer set from the SL. The sequencer set is saved only in memory. -// It will be persisted to the store when the block is produced (only in the proposer mode). -// Proposer is not changed here. + + + func (m *Manager) UpdateSequencerSetFromSL() error { seqs, err := m.SLClient.GetAllSequencers() if err != nil { @@ -175,9 +175,9 @@ func (m *Manager) UpdateSequencerSetFromSL() error { return nil } -// UpdateProposerFromSL queries the hub and updates the local dymint state proposer at the current height + func (m *Manager) UpdateProposerFromSL() error { - SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) //nolint:gosec // height is non-negative and falls in int64 + SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } diff --git a/block/slvalidator.go b/block/slvalidator.go index bf9b8ac0a..700911dc5 100644 --- a/block/slvalidator.go +++ b/block/slvalidator.go @@ -13,14 +13,14 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// SettlementValidator validates batches from settlement layer with the corresponding blocks from DA and P2P. + type SettlementValidator struct { logger types.Logger blockManager *Manager lastValidatedHeight atomic.Uint64 } -// NewSettlementValidator returns a new StateUpdateValidator instance. + func NewSettlementValidator(logger types.Logger, blockManager *Manager) *SettlementValidator { lastValidatedHeight, err := blockManager.Store.LoadValidationHeight() if err != nil { @@ -36,13 +36,13 @@ func NewSettlementValidator(logger types.Logger, blockManager *Manager) *Settlem return validator } -// ValidateStateUpdate validates that the blocks from the state info are available in DA, -// that the information included in the Hub state info matches the blocks retrieved from DA -// and those blocks are the same that are obtained via P2P. + + + func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrieveBatch) error { v.logger.Debug("validating state update", "start height", batch.StartHeight, "end height", batch.EndHeight) - // loads blocks applied from P2P, if any. + p2pBlocks := make(map[uint64]*types.Block) for height := batch.StartHeight; height <= batch.EndHeight; height++ { source, err := v.blockManager.Store.LoadBlockSource(height) @@ -51,7 +51,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie continue } - // if block is not P2P block, skip + if source != types.Gossiped && source != types.BlockSync { continue } @@ -64,7 +64,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie p2pBlocks[block.Header.Height] = block } - // load all DA blocks from the batch to be validated + var daBatch da.ResultRetrieveBatch for { daBatch = v.blockManager.Retriever.RetrieveBatches(batch.MetaData.DA) @@ -72,18 +72,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie break } - // fraud detected in case blob is retrieved but unable to get blocks from it. + if errors.Is(daBatch.BaseResult.Error, da.ErrBlobNotParsed) { return types.NewErrStateUpdateBlobCorruptedFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - // fraud detected in case availability checks fail and therefore there certainty the blob, according to the state update DA path, is not available. + checkBatchResult := v.blockManager.Retriever.CheckBatchAvailability(batch.MetaData.DA) if errors.Is(checkBatchResult.Error, da.ErrBlobNotIncluded) { return types.NewErrStateUpdateBlobNotAvailableFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - // FIXME: how to handle non-happy case? not returning error? + continue } @@ -93,18 +93,18 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie types.LastReceivedDAHeightGauge.Set(float64(batch.EndHeight())) } - // validate DA blocks against the state update + err := v.ValidateDaBlocks(batch, daBlocks) if err != nil { return err } - // nothing to validate at P2P level, finish here. + if len(p2pBlocks) == 0 { return nil } - // validate P2P blocks against DA blocks + err = v.ValidateP2PBlocks(daBlocks, p2pBlocks) if err != nil { return err @@ -113,10 +113,10 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie return nil } -// ValidateP2PBlocks basically compares that the blocks applied from P2P are the same blocks included in the batch and retrieved from DA. -// Since DA blocks have been already validated against Hub state info block descriptors, if P2P blocks match with DA blocks, it means they are also validated against state info block descriptors. + + func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBlocks map[uint64]*types.Block) error { - // iterate over daBlocks and compare hashes with the corresponding block from P2P (if exists) to see whether they are actually the same block + for _, daBlock := range daBlocks { p2pBlock, ok := p2pBlocks[daBlock.Header.Height] @@ -140,9 +140,9 @@ func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBloc return nil } -// ValidateDaBlocks checks that the information included in the Hub state info (height, state roots and timestamps), correspond to the blocks obtained from DA. + func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetrieveBatch, daBlocks []*types.Block) error { - // we first verify the numblocks included in the state info match the block descriptors and the blocks obtained from DA + numSlBDs := uint64(len(slBatch.BlockDescriptors)) numSLBlocks := slBatch.NumBlocks numDABlocks := uint64(len(daBlocks)) @@ -150,36 +150,36 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return types.NewErrStateUpdateNumBlocksNotMatchingFraud(slBatch.EndHeight, numSLBlocks, numSLBlocks, numDABlocks) } - // we compare all DA blocks against the information included in the state info block descriptors + for i, bd := range slBatch.BlockDescriptors { - // height check + if bd.Height != daBlocks[i].Header.Height { return types.NewErrStateUpdateHeightNotMatchingFraud(slBatch.StateIndex, slBatch.BlockDescriptors[0].Height, daBlocks[0].Header.Height, slBatch.BlockDescriptors[len(slBatch.BlockDescriptors)-1].Height, daBlocks[len(daBlocks)-1].Header.Height) } - // we compare the state root between SL state info and DA block + if !bytes.Equal(bd.StateRoot, daBlocks[i].Header.AppHash[:]) { return types.NewErrStateUpdateStateRootNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.StateRoot, daBlocks[i].Header.AppHash[:]) } - // we compare the timestamp between SL state info and DA block + if !bd.Timestamp.Equal(daBlocks[i].Header.GetTimestamp()) { return types.NewErrStateUpdateTimestampNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.Timestamp, daBlocks[i].Header.GetTimestamp()) } - // we validate block descriptor drs version per height + err := v.validateDRS(slBatch.StateIndex, bd.Height, bd.DrsVersion) if err != nil { return err } } - // we compare the sequencer address between SL state info and DA block - // if next sequencer is not set, we check if the sequencer hash is equal to the next sequencer hash - // because it did not change. If the next sequencer is set, we check if the next sequencer hash is equal on the - // last block of the batch + + + + lastDABlock := daBlocks[numSlBDs-1] - // if lastDaBlock is previous block to fork, dont validate nextsequencerhash of last block because it will not match + if v.blockManager.State.RevisionStartHeight-1 == lastDABlock.Header.Height { v.logger.Debug("DA blocks, previous to fork, validated successfully", "start height", daBlocks[0].Header.Height, "end height", daBlocks[len(daBlocks)-1].Header.Height) return nil @@ -202,8 +202,8 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return nil } -// UpdateLastValidatedHeight sets the height saved in the Store if it is higher than the existing height -// returns OK if the value was updated successfully or did not need to be updated + + func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { for { curr := v.lastValidatedHeight.Load() @@ -217,17 +217,17 @@ func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { } } -// GetLastValidatedHeight returns the most last block height that is validated with settlement state updates. + func (v *SettlementValidator) GetLastValidatedHeight() uint64 { return v.lastValidatedHeight.Load() } -// NextValidationHeight returns the next height that needs to be validated with settlement state updates. + func (v *SettlementValidator) NextValidationHeight() uint64 { return v.lastValidatedHeight.Load() + 1 } -// validateDRS compares the DRS version stored for the specific height, obtained from rollapp params. + func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, version uint32) error { drs, err := v.blockManager.Store.LoadDRSVersion(height) if err != nil { @@ -240,7 +240,7 @@ func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, vers return nil } -// blockHash generates a hash from the block bytes to compare them + func blockHash(block *types.Block) ([]byte, error) { blockBytes, err := block.MarshalBinary() if err != nil { diff --git a/block/state.go b/block/state.go index 7b1991bc2..2d052de06 100644 --- a/block/state.go +++ b/block/state.go @@ -19,7 +19,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// LoadStateOnInit tries to load lastState from Store, and if it's not available it reads GenesisDoc. + func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) error { s, err := store.LoadState() if errors.Is(err, types.ErrNoStateFound) { @@ -36,18 +36,18 @@ func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc return nil } -// NewStateFromGenesis reads blockchain State from genesis. -// The active sequencer list will be set on InitChain + + func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { err := genDoc.ValidateAndComplete() if err != nil { return nil, fmt.Errorf("in genesis doc: %w", err) } - // InitStateVersion sets the Consensus.Block and Software versions, - // but leaves the Consensus.App version blank. - // The Consensus.App version will be set during the Handshake, once - // we hear from the app what protocol version it is running. + + + + InitStateVersion := tmstate.Version{ Consensus: tmversion.Consensus{ Block: version.BlockProtocol, @@ -59,7 +59,7 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { s := types.State{ Version: InitStateVersion, ChainID: genDoc.ChainID, - InitialHeight: uint64(genDoc.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: uint64(genDoc.InitialHeight), ConsensusParams: *genDoc.ConsensusParams, } s.SetHeight(0) @@ -73,29 +73,29 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { return &s, nil } -// UpdateStateFromApp is responsible for aligning the state of the store from the abci app + func (m *Manager) UpdateStateFromApp(blockHeaderHash [32]byte) error { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return errorsmod.Wrap(err, "get app info") } - appHeight := uint64(proxyAppInfo.LastBlockHeight) //nolint:gosec // height is non-negative and falls in int64 + appHeight := uint64(proxyAppInfo.LastBlockHeight) resp, err := m.Store.LoadBlockResponses(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } - // update the state with the app hashes created on the app commit + m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, blockHeaderHash) return nil } func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseInitChain) { - // If the app did not return an app hash, we keep the one set from the genesis doc in - // the state. We don't set appHash since we don't want the genesis doc app hash - // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. + + + if len(res.AppHash) > 0 { copy(s.AppHash[:], res.AppHash) } @@ -106,7 +106,7 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.ConsensusParams.Block.MaxGas = params.Block.MaxGas } } - // We update the last results hash with the empty hash, to conform with RFC-6962. + copy(s.LastResultsHash[:], merkle.HashFromByteSlices(nil)) } @@ -115,7 +115,7 @@ func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } -// UpdateStateAfterCommit updates the state with the app hash and last results hash + func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) @@ -132,26 +132,26 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp } } -// UpdateProposerFromBlock updates the proposer from the block -// The next proposer is defined in the block header (NextSequencersHash) -// TODO: (https://github.com/dymensionxyz/dymint/issues/1008) + + + func (e *Executor) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool { - // no sequencer change + if bytes.Equal(block.Header.SequencerHash[:], block.Header.NextSequencersHash[:]) { return false } if block.Header.NextSequencersHash == [32]byte{} { - // the chain will be halted until proposer is set - // TODO: recover from halt (https://github.com/dymensionxyz/dymint/issues/1021) + + e.logger.Info("rollapp left with no proposer. chain is halted") s.SetProposer(nil) return true } - // if hash changed, update the proposer - // We assume here that we're updated with the latest sequencer set - // FIXME: Think how to handle not being updated with the latest sequencer set + + + seq, found := seqSet.GetByHash(block.Header.NextSequencersHash[:]) if !found { e.logger.Error("cannot find proposer by hash") diff --git a/block/submit.go b/block/submit.go index 3ee4e2dc4..87150c3c9 100644 --- a/block/submit.go +++ b/block/submit.go @@ -17,11 +17,11 @@ import ( uchannel "github.com/dymensionxyz/dymint/utils/channel" ) -// SubmitLoop is the main loop for submitting blocks to the DA and SL layers. -// It submits a batch when either -// 1) It accumulates enough block data, so it's necessary to submit a batch to avoid exceeding the max size -// 2) Enough time passed since the last submitted batch, so it's necessary to submit a batch to avoid exceeding the max time -// It will back pressure (pause) block production if it falls too far behind. + + + + + func (m *Manager) SubmitLoop(ctx context.Context, bytesProduced chan int, ) (err error) { @@ -39,41 +39,41 @@ func (m *Manager) SubmitLoop(ctx context.Context, ) } -// SubmitLoopInner is a unit testable impl of SubmitLoop + func SubmitLoopInner( ctx context.Context, logger types.Logger, - bytesProduced chan int, // a channel of block and commit bytes produced - maxSkewTime time.Duration, // max time between last submitted block and last produced block allowed. if this threshold is reached block production is stopped. - unsubmittedBlocksNum func() uint64, // func that returns the amount of non-submitted blocks - unsubmittedBlocksBytes func() int, // func that returns bytes from non-submitted blocks - batchSkewTime func() time.Duration, // func that returns measured time between last submitted block and last produced block - maxBatchSubmitTime time.Duration, // max time to allow between batches - maxBatchSubmitBytes uint64, // max size of serialised batch in bytes + bytesProduced chan int, + maxSkewTime time.Duration, + unsubmittedBlocksNum func() uint64, + unsubmittedBlocksBytes func() int, + batchSkewTime func() time.Duration, + maxBatchSubmitTime time.Duration, + maxBatchSubmitBytes uint64, createAndSubmitBatch func(maxSizeBytes uint64) (bytes uint64, err error), ) error { eg, ctx := errgroup.WithContext(ctx) pendingBytes := atomic.Uint64{} - trigger := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on trigger thread - submitter := uchannel.NewNudger() // used to avoid busy waiting (using cpu) on submitter thread + trigger := uchannel.NewNudger() + submitter := uchannel.NewNudger() eg.Go(func() error { - // 'trigger': this thread is responsible for waking up the submitter when a new block arrives, and back-pressures the block production loop - // if it gets too far ahead. + + for { select { case <-ctx.Done(): return nil case n := <-bytesProduced: - pendingBytes.Add(uint64(n)) //nolint:gosec // bytes size is always positive + pendingBytes.Add(uint64(n)) logger.Debug("Added bytes produced to bytes pending submission counter.", "bytes added", n, "pending", pendingBytes.Load()) } submitter.Nudge() - // if the time between the last produced block and last submitted is greater than maxSkewTime we block here until we get a progress nudge from the submitter thread + if maxSkewTime < batchSkewTime() { select { case <-ctx.Done(): @@ -86,7 +86,7 @@ func SubmitLoopInner( }) eg.Go(func() error { - // 'submitter': this thread actually creates and submits batches. this thread is woken up every batch_submit_time (in addition to every block produced) to check if there is anything to submit even if no new blocks have been produced + ticker := time.NewTicker(maxBatchSubmitTime) for { select { @@ -98,7 +98,7 @@ func SubmitLoopInner( pending := pendingBytes.Load() - // while there are accumulated blocks, create and submit batches!! + for { done := ctx.Err() != nil nothingToSubmit := pending == 0 @@ -119,22 +119,22 @@ func SubmitLoopInner( logger.Error("Create and submit batch", "err", err, "pending", pending) panic(err) } - // this could happen if we timed-out waiting for acceptance in the previous iteration, but the batch was indeed submitted. - // we panic here cause restarting may reset the last batch submitted counter and the sequencer can potentially resume submitting batches. + + if errors.Is(err, gerrc.ErrAlreadyExists) { logger.Debug("Batch already accepted", "err", err, "pending", pending) panic(err) } return err } - pending = uint64(unsubmittedBlocksBytes()) //nolint:gosec // bytes size is always positive - // after new batch submitted we check the skew time to wake up 'trigger' thread and restart block production + pending = uint64(unsubmittedBlocksBytes()) + if batchSkewTime() < maxSkewTime { trigger.Nudge() } logger.Debug("Submitted a batch to both sub-layers.", "n bytes consumed from pending", nConsumed, "pending after", pending, "skew time", batchSkewTime()) } - // update pendingBytes with non submitted block bytes after all pending batches have been submitted + pendingBytes.Store(pending) } }) @@ -142,25 +142,25 @@ func SubmitLoopInner( return eg.Wait() } -// CreateAndSubmitBatchGetSizeBlocksCommits creates and submits a batch to the DA and SL. -// Returns size of block and commit bytes -// max size bytes is the maximum size of the serialized batch type + + + func (m *Manager) CreateAndSubmitBatchGetSizeBlocksCommits(maxSize uint64) (uint64, error) { b, err := m.CreateAndSubmitBatch(maxSize, false) if b == nil { return 0, err } - return uint64(b.SizeBlockAndCommitBytes()), err //nolint:gosec // size is always positive and falls in uint64 + return uint64(b.SizeBlockAndCommitBytes()), err } -// CreateAndSubmitBatch creates and submits a batch to the DA and SL. -// max size bytes is the maximum size of the serialized batch type + + func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*types.Batch, error) { startHeight := m.NextHeightToSubmit() endHeightInclusive := m.State.Height() if endHeightInclusive < startHeight { - // TODO: https://github.com/dymensionxyz/dymint/issues/999 + return nil, fmt.Errorf( "next height to submit is greater than last block height, create and submit batch should not have been called: start height: %d: end height inclusive: %d: %w", startHeight, @@ -173,7 +173,7 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty if err != nil { return nil, fmt.Errorf("create batch: %w", err) } - // This is the last batch, so we need to mark it as such + if lastBatch && b.EndHeight() == endHeightInclusive { b.LastBatch = true } @@ -187,8 +187,8 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty return b, nil } -// CreateBatch looks through the store for any unsubmitted blocks and commits and bundles them into a batch -// max size bytes is the maximum size of the serialized batch type + + func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeightInclusive uint64) (*types.Batch, error) { batchSize := endHeightInclusive - startHeight + 1 batch := &types.Batch{ @@ -211,7 +211,7 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight return nil, fmt.Errorf("load drs version: h: %d: %w", h, err) } - // check all blocks have the same revision + if len(batch.Blocks) > 0 && batch.Blocks[len(batch.Blocks)-1].GetRevision() != block.GetRevision() { return nil, fmt.Errorf("create batch: batch includes blocks with different revisions: %w", gerrc.ErrInternal) } @@ -221,9 +221,9 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight batch.DRSVersion = append(batch.DRSVersion, drsVersion) totalSize := batch.SizeBytes() - if maxBatchSize < uint64(totalSize) { //nolint:gosec // size is always positive and falls in uint64 + if maxBatchSize < uint64(totalSize) { - // Remove the last block and commit from the batch + batch.Blocks = batch.Blocks[:len(batch.Blocks)-1] batch.Commits = batch.Commits[:len(batch.Commits)-1] batch.DRSVersion = batch.DRSVersion[:len(batch.DRSVersion)-1] @@ -256,19 +256,17 @@ func (m *Manager) SubmitBatch(batch *types.Batch) error { types.RollappHubHeightGauge.Set(float64(batch.EndHeight())) m.LastSettlementHeight.Store(batch.EndHeight()) - // update last submitted block time with batch last block (used to calculate max skew time) + m.LastBlockTimeInSettlement.Store(batch.Blocks[len(batch.Blocks)-1].Header.GetTimestamp().UTC().UnixNano()) return err } -// GetUnsubmittedBytes returns the total number of unsubmitted bytes produced an element on a channel -// Intended only to be used at startup, before block production and submission loops start + + func (m *Manager) GetUnsubmittedBytes() int { total := 0 - /* - On node start we want to include the count of any blocks which were produced and not submitted in a previous instance - */ + currH := m.State.Height() for h := m.NextHeightToSubmit(); h <= currH; h++ { @@ -296,8 +294,8 @@ func (m *Manager) GetUnsubmittedBlocks() uint64 { return m.State.Height() - m.LastSettlementHeight.Load() } -// UpdateLastSubmittedHeight will update last height submitted height upon events. -// This may be necessary in case we crashed/restarted before getting response for our submission to the settlement layer. + + func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -314,7 +312,7 @@ func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { } } -// GetBatchSkewTime returns the time between the last produced block and the last block submitted to SL + func (m *Manager) GetBatchSkewTime() time.Duration { lastProducedTime := time.Unix(0, m.LastBlockTime.Load()) lastSubmittedTime := time.Unix(0, m.LastBlockTimeInSettlement.Load()) diff --git a/block/sync.go b/block/sync.go index 9c3605669..bef64587e 100644 --- a/block/sync.go +++ b/block/sync.go @@ -12,7 +12,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) -// onNewStateUpdate will update the last submitted height and will update sequencers list from SL. After, it triggers syncing or validation, depending whether it needs to sync first or only validate. + func (m *Manager) onNewStateUpdate(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -20,32 +20,32 @@ func (m *Manager) onNewStateUpdate(event pubsub.Message) { return } - // Update heights based on state update end height + m.LastSettlementHeight.Store(eventData.EndHeight) - // Update sequencers list from SL + err := m.UpdateSequencerSetFromSL() if err != nil { - // this error is not critical + m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } if eventData.EndHeight > m.State.Height() { - // trigger syncing from settlement last state update. + m.triggerSettlementSyncing() - // update target height used for syncing status rpc + m.UpdateTargetHeight(eventData.EndHeight) } else { - // trigger validation of the last state update available in settlement + m.triggerSettlementValidation() } } -// SettlementSyncLoop listens for syncing triggers which indicate new settlement height updates, and attempts to sync to the last seen settlement height. -// Syncing triggers can be called when a new settlement state update event arrives or explicitly from the `updateFromLastSettlementState` method which is only being called upon startup. -// Upon new trigger, we know the settlement reached a new height we haven't seen before so a validation signal is sent to validate the settlement batch. -// Note: even when a sync is triggered, there is no guarantee that the batch will be applied from settlement as there is a race condition with the p2p/blocksync for syncing. + + + + func (m *Manager) SettlementSyncLoop(ctx context.Context) error { for { select { @@ -55,12 +55,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("syncing to target height", "targetHeight", m.LastSettlementHeight.Load()) for currH := m.State.NextHeight(); currH <= m.LastSettlementHeight.Load(); currH = m.State.NextHeight() { - // if context has been cancelled, stop syncing + if ctx.Err() != nil { return nil } - // if we have the block locally, we don't need to fetch it from the DA. - // it will only happen in case of rollback. + + err := m.applyLocalBlock() if err == nil { m.logger.Info("Synced from local", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) @@ -76,12 +76,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } m.logger.Info("Retrieved state update from SL.", "state_index", settlementBatch.StateIndex) - // we update LastBlockTimeInSettlement to be able to measure batch skew time with last block time in settlement + m.LastBlockTimeInSettlement.Store(settlementBatch.BlockDescriptors[len(settlementBatch.BlockDescriptors)-1].GetTimestamp().UTC().UnixNano()) err = m.ApplyBatchFromSL(settlementBatch.Batch) - // this will keep sync loop alive when DA is down or retrievals are failing because DA issues. + if errors.Is(err, da.ErrRetrieval) { continue } @@ -91,7 +91,7 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("Synced from DA", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) - // trigger state update validation, after each state update is applied + m.triggerSettlementValidation() err = m.attemptApplyCachedBlocks() @@ -101,10 +101,10 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } - // avoid notifying as synced in case it fails before + if m.State.Height() >= m.LastSettlementHeight.Load() { m.logger.Info("Synced.", "current height", m.State.Height(), "last submitted height", m.LastSettlementHeight.Load()) - // nudge to signal to any listens that we're currently synced with the last settlement height we've seen so far + m.syncedFromSettlement.Nudge() } @@ -112,14 +112,14 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } } -// waitForSyncing waits for synced nudge (in case it needs to because it was syncing) + func (m *Manager) waitForSettlementSyncing() { if m.State.Height() < m.LastSettlementHeight.Load() { <-m.syncedFromSettlement.C } } -// triggerStateUpdateSyncing sends signal to channel used by syncing loop + func (m *Manager) triggerSettlementSyncing() { select { case m.settlementSyncingC <- struct{}{}: @@ -128,7 +128,7 @@ func (m *Manager) triggerSettlementSyncing() { } } -// triggerStateUpdateValidation sends signal to channel used by validation loop + func (m *Manager) triggerSettlementValidation() { select { case m.settlementValidationC <- struct{}{}: diff --git a/block/validate.go b/block/validate.go index d2a86d07f..e4078fe8a 100644 --- a/block/validate.go +++ b/block/validate.go @@ -11,8 +11,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// onNewStateUpdateFinalized will update the last validated height with the last finalized height. -// Unlike pending heights, once heights are finalized, we treat them as validated as there is no point validating finalized heights. + + func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -22,7 +22,7 @@ func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { m.SettlementValidator.UpdateLastValidatedHeight(eventData.EndHeight) } -// SettlementValidateLoop listens for syncing events (from new state update or from initial syncing) and validates state updates to the last submitted height. + func (m *Manager) SettlementValidateLoop(ctx context.Context) error { for { select { @@ -33,14 +33,14 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { m.logger.Info("validating state updates to target height", "targetHeight", targetValidationHeight) for currH := m.SettlementValidator.NextValidationHeight(); currH <= targetValidationHeight; currH = m.SettlementValidator.NextValidationHeight() { - // get next batch that needs to be validated from SL + batch, err := m.SLClient.GetBatchAtHeight(currH) if err != nil { uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) return err } - // validate batch + err = m.SettlementValidator.ValidateStateUpdate(batch) if err != nil { if errors.Is(err, gerrc.ErrFault) { @@ -51,7 +51,7 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { return err } - // update the last validated height to the batch last block height + m.SettlementValidator.UpdateLastValidatedHeight(batch.EndHeight) m.logger.Debug("state info validated", "lastValidatedHeight", m.SettlementValidator.GetLastValidatedHeight()) diff --git a/cmd/dymint/commands/init.go b/cmd/dymint/commands/init.go index 9587731fd..ce3ee91e3 100644 --- a/cmd/dymint/commands/init.go +++ b/cmd/dymint/commands/init.go @@ -14,7 +14,7 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" ) -// InitFilesCmd initialises a fresh Dymint Core instance. + var InitFilesCmd = &cobra.Command{ Use: "init", Short: "Initialize Dymint", @@ -25,9 +25,9 @@ func initFiles(cmd *cobra.Command, args []string) error { return InitFilesWithConfig(tmconfig) } -// InitFilesWithConfig initialises a fresh Dymint instance. + func InitFilesWithConfig(config *cfg.Config) error { - // private validator + privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() var pv *privval.FilePV @@ -52,7 +52,7 @@ func InitFilesWithConfig(config *cfg.Config) error { logger.Info("Generated node key", "path", nodeKeyFile) } - // genesis file + genFile := config.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) diff --git a/cmd/dymint/commands/root.go b/cmd/dymint/commands/root.go index af981f80e..8db70aedc 100644 --- a/cmd/dymint/commands/root.go +++ b/cmd/dymint/commands/root.go @@ -28,8 +28,8 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", tmconfig.LogLevel, "log level") } -// ParseConfig retrieves the default environment configuration, -// sets up the Dymint root and ensures that the root exists + + func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) @@ -60,14 +60,14 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { return conf, nil } -// RootCmd is the root command for Dymint core. + var RootCmd = &cobra.Command{ Use: "dymint", Short: "ABCI-client implementation for dymension's autonomous rollapps", PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { v := viper.GetViper() - // cmd.Flags() includes flags from this command and all persistent flags from the parent + if err := v.BindPFlags(cmd.Flags()); err != nil { return err } diff --git a/cmd/dymint/commands/show_node_id.go b/cmd/dymint/commands/show_node_id.go index 1ca1b3322..30d3c9e87 100644 --- a/cmd/dymint/commands/show_node_id.go +++ b/cmd/dymint/commands/show_node_id.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// ShowNodeIDCmd dumps node's ID to the standard output. + var ShowNodeIDCmd = &cobra.Command{ Use: "show-node-id", Aliases: []string{"show_node_id"}, @@ -27,7 +27,7 @@ func showNodeID(cmd *cobra.Command, args []string) error { if err != nil { return err } - // convert nodeKey to libp2p key + host, err := libp2p.New(libp2p.Identity(signingKey)) if err != nil { return err diff --git a/cmd/dymint/commands/show_sequencer.go b/cmd/dymint/commands/show_sequencer.go index cb6e72955..2faff6840 100644 --- a/cmd/dymint/commands/show_sequencer.go +++ b/cmd/dymint/commands/show_sequencer.go @@ -9,13 +9,13 @@ import ( "github.com/tendermint/tendermint/privval" ) -// ShowSequencer adds capabilities for showing the validator info. + var ShowSequencer = &cobra.Command{ Use: "show-sequencer", Aliases: []string{"show_sequencer"}, Short: "Show this node's sequencer info", RunE: showSequencer, - // PreRun: deprecateSnakeCase, + } func showSequencer(cmd *cobra.Command, args []string) error { diff --git a/cmd/dymint/commands/start.go b/cmd/dymint/commands/start.go index 3bfa6e503..1615ff2cd 100644 --- a/cmd/dymint/commands/start.go +++ b/cmd/dymint/commands/start.go @@ -32,8 +32,8 @@ import ( var genesisHash []byte -// NewRunNodeCmd returns the command that allows the CLI to start a node. -// It can be used with a custom PrivValidator and in-process ABCI application. + + func NewRunNodeCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", @@ -125,7 +125,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L logger.Info("Started dymint node") - // Stop upon receiving SIGTERM or CTRL-C. + tmos.TrapSignal(logger, func() { logger.Info("Caught SIGTERM. Exiting...") if dymintNode.IsRunning() { @@ -135,7 +135,7 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L } }) - // Run forever. + select {} } @@ -148,7 +148,7 @@ func checkGenesisHash(config *tmcfg.Config) error { return nil } - // Calculate SHA-256 hash of the genesis file. + f, err := os.Open(config.GenesisFile()) if err != nil { return fmt.Errorf("can't open genesis file: %w", err) @@ -164,7 +164,7 @@ func checkGenesisHash(config *tmcfg.Config) error { } actualHash := h.Sum(nil) - // Compare with the flag. + if !bytes.Equal(genesisHash, actualHash) { return fmt.Errorf( "--genesis_hash=%X does not match %s hash: %X", diff --git a/cmd/dymint/main.go b/cmd/dymint/main.go index 631383649..200c33f82 100644 --- a/cmd/dymint/main.go +++ b/cmd/dymint/main.go @@ -20,7 +20,7 @@ func main() { cli.NewCompletionCmd(rootCmd, true), ) - // Create & start node + rootCmd.AddCommand(commands.NewRunNodeCmd()) cmd := cli.PrepareBaseCmd(rootCmd, "DM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultDymintDir))) diff --git a/config/config.go b/config/config.go index c19c58277..65b9e09e3 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,7 @@ import ( ) const ( - // DefaultDymintDir is the default directory for dymint + DefaultDymintDir = ".dymint" DefaultConfigDirName = "config" DefaultConfigFileName = "dymint.toml" @@ -23,63 +23,63 @@ const ( MaxBatchSubmitTime = 1 * time.Hour ) -// NodeConfig stores Dymint node configuration. + type NodeConfig struct { - // parameters below are translated from existing config + RootDir string DBPath string RPC RPCConfig MempoolConfig tmcfg.MempoolConfig - // parameters below are dymint specific and read from config + BlockManagerConfig `mapstructure:",squash"` DAConfig string `mapstructure:"da_config"` SettlementLayer string `mapstructure:"settlement_layer"` SettlementConfig settlement.Config `mapstructure:",squash"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` - // Config params for mock grpc da + DAGrpc grpc.Config `mapstructure:",squash"` - // P2P Options + P2PConfig `mapstructure:",squash"` - // DB Options + DBConfig `mapstructure:"db"` } -// BlockManagerConfig consists of all parameters required by BlockManagerConfig + type BlockManagerConfig struct { - // BlockTime defines how often new blocks are produced + BlockTime time.Duration `mapstructure:"block_time"` - // MaxIdleTime defines how long should block manager wait for new transactions before producing empty block + MaxIdleTime time.Duration `mapstructure:"max_idle_time"` - // MaxProofTime defines the max time to be idle, if txs that requires proof were included in last block + MaxProofTime time.Duration `mapstructure:"max_proof_time"` - // BatchSubmitMaxTime is how long should block manager wait for before submitting batch + BatchSubmitTime time.Duration `mapstructure:"batch_submit_time"` - // MaxSkewTime is the number of batches waiting to be submitted. Block production will be paused if this limit is reached. + MaxSkewTime time.Duration `mapstructure:"max_skew_time"` - // The size of the batch of blocks and commits in Bytes. We'll write every batch to the DA and the settlement layer. + BatchSubmitBytes uint64 `mapstructure:"batch_submit_bytes"` - // SequencerSetUpdateInterval defines the interval at which to fetch sequencer updates from the settlement layer + SequencerSetUpdateInterval time.Duration `mapstructure:"sequencer_update_interval"` } -// GetViperConfig reads configuration parameters from Viper instance. + func (nc *NodeConfig) GetViperConfig(cmd *cobra.Command, homeDir string) error { v := viper.GetViper() - // Loads dymint toml config file + EnsureRoot(homeDir, nil) v.SetConfigName("dymint") - v.AddConfigPath(homeDir) // search root directory - v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) // search root directory /config + v.AddConfigPath(homeDir) + v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) - // bind flags so we could override config file with flags + err := BindDymintFlags(cmd, v) if err != nil { return err } - // Read viper config + err = v.ReadInConfig() if err != nil { return err @@ -126,7 +126,7 @@ func (nc NodeConfig) Validate() error { return nil } -// Validate BlockManagerConfig + func (c BlockManagerConfig) Validate() error { if c.BlockTime < MinBlockTime { return fmt.Errorf("block_time cannot be less than %s", MinBlockTime) @@ -139,7 +139,7 @@ func (c BlockManagerConfig) Validate() error { if c.MaxIdleTime < 0 { return fmt.Errorf("max_idle_time must be positive or zero to disable") } - // MaxIdleTime zero disables adaptive block production. + if c.MaxIdleTime != 0 { if c.MaxIdleTime <= c.BlockTime || c.MaxIdleTime > MaxBatchSubmitTime { return fmt.Errorf("max_idle_time must be greater than block_time and not greater than %s", MaxBatchSubmitTime) @@ -203,14 +203,14 @@ func (nc NodeConfig) validateInstrumentation() error { return nc.Instrumentation.Validate() } -// InstrumentationConfig defines the configuration for metrics reporting. + type InstrumentationConfig struct { - // When true, Prometheus metrics are served under /metrics on - // PrometheusListenAddr. - // Check out the documentation for the list of available metrics. + + + Prometheus bool `mapstructure:"prometheus"` - // Address to listen for Prometheus collector(s) connections. + PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` } @@ -222,11 +222,11 @@ func (ic InstrumentationConfig) Validate() error { return nil } -// DBConfig holds configuration for the database. + type DBConfig struct { - // SyncWrites makes sure that data is written to disk before returning from a write operation. + SyncWrites bool `mapstructure:"sync_writes"` - // InMemory sets the database to run in-memory, without touching the disk. + InMemory bool `mapstructure:"in_memory"` } diff --git a/config/defaults.go b/config/defaults.go index b72ef3aac..0a75b14a6 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -9,7 +9,7 @@ import ( ) const ( - // DefaultListenAddress is a default listen address for P2P client. + DefaultListenAddress = "/ip4/0.0.0.0/tcp/26656" DefaultHomeDir = "sequencer_keys" @@ -17,10 +17,10 @@ const ( DefaultSequencerSetUpdateInterval = 3 * time.Minute ) -// DefaultNodeConfig keeps default values of NodeConfig + var DefaultNodeConfig = *DefaultConfig("") -// DefaultConfig returns a default configuration for dymint node. + func DefaultConfig(home string) *NodeConfig { cfg := &NodeConfig{ BlockManagerConfig: BlockManagerConfig{ @@ -57,7 +57,7 @@ func DefaultConfig(home string) *NodeConfig { } keyringDir := filepath.Join(home, DefaultHomeDir) - // Setting default params for sl grpc mock + defaultSlGrpcConfig := settlement.GrpcConfig{ Host: "127.0.0.1", Port: 7981, @@ -79,7 +79,7 @@ func DefaultConfig(home string) *NodeConfig { } cfg.SettlementConfig = defaultSLconfig - // Setting default params for da grpc mock + defaultDAGrpc := grpc.Config{ Host: "127.0.0.1", Port: 7980, diff --git a/config/flags.go b/config/flags.go index 1f1eaf83e..d476c39f2 100644 --- a/config/flags.go +++ b/config/flags.go @@ -32,11 +32,11 @@ const ( FlagP2PBootstrapRetryTime = "dymint.p2p_config.bootstrap_retry_time" ) -// AddNodeFlags adds Dymint specific configuration options to cobra Command. -// -// This function is called in cosmos-sdk. + + + func AddNodeFlags(cmd *cobra.Command) { - // Add tendermint default flags + tmcmd.AddNodeFlags(cmd) def := DefaultNodeConfig @@ -58,7 +58,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String(FlagP2PListenAddress, def.P2PConfig.ListenAddress, "P2P listen address") cmd.Flags().String(FlagP2PBootstrapNodes, def.P2PConfig.BootstrapNodes, "P2P bootstrap nodes") cmd.Flags().Duration(FlagP2PBootstrapRetryTime, def.P2PConfig.BootstrapRetryTime, "P2P bootstrap time") - cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") //nolint:gosec // GossipSubCacheSize should be always positive + cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") } func BindDymintFlags(cmd *cobra.Command, v *viper.Viper) error { diff --git a/config/p2p.go b/config/p2p.go index a2449ed43..71b18b180 100644 --- a/config/p2p.go +++ b/config/p2p.go @@ -5,27 +5,27 @@ import ( "time" ) -// P2PConfig stores configuration related to peer-to-peer networking. + type P2PConfig struct { - // Listening address for P2P connections + ListenAddress string `mapstructure:"p2p_listen_address"` - // List of nodes used for P2P bootstrapping + BootstrapNodes string `mapstructure:"p2p_bootstrap_nodes"` - // List of nodes persistent P2P nodes + PersistentNodes string `mapstructure:"p2p_persistent_nodes"` - // Size of the Gossipsub router cache + GossipSubCacheSize int `mapstructure:"p2p_gossip_cache_size"` - // Time interval a node tries to bootstrap again, in case no nodes connected + BootstrapRetryTime time.Duration `mapstructure:"p2p_bootstrap_retry_time"` - // Param used to enable block sync from p2p + BlockSyncEnabled bool `mapstructure:"p2p_blocksync_enabled"` - // Time interval used by a node to request missing blocks (gap between cached blocks and local height) on demand from other peers using blocksync + BlockSyncRequestIntervalTime time.Duration `mapstructure:"p2p_blocksync_block_request_interval"` - // Param used to enable the advertisement of the node to be part of the P2P network in the DHT + AdvertisingEnabled bool `mapstructure:"p2p_advertising_enabled"` } -// Validate P2PConfig + func (c P2PConfig) Validate() error { if c.GossipSubCacheSize < 0 { return fmt.Errorf("gossipsub cache size cannot be negative") diff --git a/config/rpc.go b/config/rpc.go index d6b14303a..baa5e8e7b 100644 --- a/config/rpc.go +++ b/config/rpc.go @@ -1,38 +1,38 @@ package config -// RPCConfig holds RPC configuration params. + type RPCConfig struct { ListenAddress string - // Cross Origin Resource Sharing settings + CORSAllowedOrigins []string CORSAllowedMethods []string CORSAllowedHeaders []string - // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} - // 1024 - 40 - 10 - 50 = 924 = ~900 + + + + + + + MaxOpenConnections int - // The path to a file containing certificate that is used to create the HTTPS server. - // Might be either absolute path or path related to Tendermint's config directory. - // - // If the certificate is signed by a certificate authority, - // the certFile should be the concatenation of the server's certificate, any intermediates, - // and the CA's certificate. - // - // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. - // Otherwise, HTTP server is run. + + + + + + + + + TLSCertFile string `mapstructure:"tls-cert-file"` - // The path to a file containing matching private key that is used to create the HTTPS server. - // Might be either absolute path or path related to tendermint's config directory. - // - // NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. - // Otherwise, HTTP server is run. + + + + + TLSKeyFile string `mapstructure:"tls-key-file"` } diff --git a/config/toml.go b/config/toml.go index 9ee3d544d..4bf51e276 100644 --- a/config/toml.go +++ b/config/toml.go @@ -9,7 +9,7 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) -// DefaultDirPerm is the default permissions used when creating directories. + const DefaultDirPerm = 0o700 var configTemplate *template.Template @@ -24,10 +24,10 @@ func init() { } } -/****** these are for production settings ***********/ -// EnsureRoot creates the root, config, and data directories if they don't exist, -// and panics if it fails. + + + func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil { panic(err.Error()) @@ -42,13 +42,13 @@ func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { configFilePath := filepath.Join(rootDir, DefaultConfigDirName, DefaultConfigFileName) - // Write default config file if missing. + if !tmos.FileExists(configFilePath) { WriteConfigFile(configFilePath, defaultConfig) } } -// WriteConfigFile renders config using the template and writes it to configFilePath. + func WriteConfigFile(configFilePath string, config *NodeConfig) { var buffer bytes.Buffer @@ -59,8 +59,8 @@ func WriteConfigFile(configFilePath string, config *NodeConfig) { tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644) } -// Note: any changes to the comments/variables/mapstructure -// must be reflected in the appropriate struct in config/config.go + + const defaultConfigTemplate = ` ####################################################### ### Dymint Configuration Options ### diff --git a/conv/config.go b/conv/config.go index 65498dc12..ec9b9e7f4 100644 --- a/conv/config.go +++ b/conv/config.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/config" ) -// GetNodeConfig translates Tendermint's configuration into Dymint configuration. -// -// This method only translates configuration, and doesn't verify it. If some option is missing in Tendermint's -// config, it's skipped during translation. + + + + func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf == nil { return errors.New("tendermint config is nil but required to populate Dymint config") @@ -31,12 +31,7 @@ func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf.Mempool == nil { return errors.New("tendermint mempool config is nil but required to populate Dymint config") } - /* - In the above, we are copying the rpc/p2p from Tendermint's configuration to Dymint's configuration. - This was implemented by the original rollkit authors, and they have not provided any explanation for this. - - For the mempool we simply copy the object. If we want to be more selective, we can adjust later. - */ + nodeConf.MempoolConfig = *tmConf.Mempool return nil diff --git a/conv/crypto.go b/conv/crypto.go index b2c49e18a..4f04470fa 100644 --- a/conv/crypto.go +++ b/conv/crypto.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// GetNodeKey creates libp2p private key from Tendermints NodeKey. + func GetNodeKey(nodeKey *p2p.NodeKey) (crypto.PrivKey, error) { if nodeKey == nil || nodeKey.PrivKey == nil { return nil, ErrNilKey diff --git a/da/avail/avail.go b/da/avail/avail.go index 81c30b48b..3d375b000 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -34,7 +34,7 @@ const ( DataCallMethod = "submit_data" DataCallSectionIndex = 29 DataCallMethodIndex = 1 - maxBlobSize = 2097152 // 2MB according to Avail docs https://docs.availproject.org/docs/build-with-avail/overview#expandable-blockspace + maxBlobSize = 2097152 ) type SubstrateApiI interface { @@ -74,35 +74,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// WithClient is an option which sets the client. + func WithClient(client SubstrateApiI) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).client = client } } -// WithTxInclusionTimeout is an option which sets the timeout for waiting for transaction inclusion. + func WithTxInclusionTimeout(timeout time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).txInclusionTimeout = timeout } } -// WithBatchRetryDelay is an option which sets the delay between batch retries. + func WithBatchRetryDelay(delay time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryDelay = delay } } -// WithBatchRetryAttempts is an option which sets the number of batch retries. + func WithBatchRetryAttempts(attempts uint) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryAttempts = attempts } } -// Init initializes DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -114,18 +114,18 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S } } - // Set defaults + c.pubsubServer = pubsubServer c.txInclusionTimeout = defaultTxInculsionTimeout c.batchRetryDelay = defaultBatchRetryDelay c.batchRetryAttempts = defaultBatchRetryAttempts - // Apply options + for _, apply := range options { apply(c) } - // If client wasn't set, create a new one + if c.client == nil { substrateApiClient, err := gsrpc.NewSubstrateAPI(c.config.ApiURL) if err != nil { @@ -144,32 +144,32 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S return nil } -// Start starts DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Start() error { c.synced <- struct{}{} return nil } -// Stop stops DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Stop() error { c.cancel() close(c.synced) return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Avail } -// RetrieveBatches retrieves batch from DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - //nolint:typecheck + blockHash, err := c.client.GetBlockHash(daMetaData.Height) if err != nil { return da.ResultRetrieveBatch{ @@ -190,10 +190,10 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet }, } } - // Convert the data returned to batches + var batches []*types.Batch for _, ext := range block.Block.Extrinsics { - // these values below are specific indexes only for data submission, differs with each extrinsic + if ext.Signature.AppID.Int64() == c.config.AppID && ext.Method.CallIndex.SectionIndex == DataCallSectionIndex && ext.Method.CallIndex.MethodIndex == DataCallMethodIndex { @@ -206,16 +206,16 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet c.logger.Error("unmarshal batch", "daHeight", daMetaData.Height, "error", err) continue } - // Convert the proto batch to a batch + batch := &types.Batch{} err = batch.FromProto(&pbBatch) if err != nil { c.logger.Error("batch from proto", "daHeight", daMetaData.Height, "error", err) continue } - // Add the batch to the list + batches = append(batches, batch) - // Remove the bytes we just decoded. + data = data[proto.Size(&pbBatch):] } @@ -233,7 +233,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet } } -// SubmitBatch submits batch to DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { blob, err := batch.MarshalBinary() if err != nil { @@ -250,8 +250,8 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return c.submitBatchLoop(blob) } -// submitBatchLoop tries submitting the batch. In case we get a configuration error we would like to stop trying, -// otherwise, for network error we keep trying indefinitely. + + func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.ResultSubmitBatch { for { select { @@ -318,8 +318,8 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result } } -// broadcastTx broadcasts the transaction to the network and in case of success -// returns the block height the batch was included in. + + func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { meta, err := c.client.GetMetadataLatest() if err != nil { @@ -329,7 +329,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Create the extrinsic + ext := availtypes.NewExtrinsic(newCall) genesisHash, err := c.client.GetBlockHash(0) if err != nil { @@ -343,7 +343,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Get the account info for the nonce + key, err := availtypes.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) @@ -364,16 +364,16 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { SpecVersion: rv.SpecVersion, Tip: availtypes.NewUCompactFromUInt(c.config.Tip), TransactionVersion: rv.TransactionVersion, - AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), //nolint:gosec // AppID should be always positive + AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), } - // Sign the transaction using Alice's default account + err = ext.Sign(keyringPair, options) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - // Send the extrinsic + sub, err := c.client.SubmitAndWatchExtrinsic(ext) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastNetworkError, err) @@ -419,7 +419,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { } } -// CheckBatchAvailability checks batch availability in DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -429,7 +429,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } -// getHeightFromHash returns the block height from the block hash + func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (uint64, error) { c.logger.Debug("Getting block height from hash", "hash", hash) header, err := c.client.GetHeader(hash) @@ -439,12 +439,12 @@ func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (u return uint64(header.Number), nil } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } -// GetBalance returns the balance for a specific address + func (c *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { return da.Balance{}, nil } diff --git a/da/celestia/celestia.go b/da/celestia/celestia.go index 80cd32f85..6eda30bf5 100644 --- a/da/celestia/celestia.go +++ b/da/celestia/celestia.go @@ -26,7 +26,7 @@ import ( uretry "github.com/dymensionxyz/dymint/utils/retry" ) -// DataAvailabilityLayerClient use celestia-node public API. + type DataAvailabilityLayerClient struct { rpc celtypes.CelestiaRPCClient @@ -43,35 +43,35 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// WithRPCClient sets rpc client. + func WithRPCClient(rpc celtypes.CelestiaRPCClient) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).rpc = rpc } } -// WithRPCRetryDelay sets failed rpc calls retry delay. + func WithRPCRetryDelay(delay time.Duration) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryDelay = delay } } -// WithRPCAttempts sets failed rpc calls retry attempts. + func WithRPCAttempts(attempts int) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryAttempts = &attempts } } -// WithSubmitBackoff sets submit retry delay config. + func WithSubmitBackoff(c uretry.BackoffConfig) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.Backoff = c } } -// Init initializes DataAvailabilityLayerClient instance. + func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -85,7 +85,7 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S c.pubsubServer = pubsubServer - // Apply options + for _, apply := range options { apply(c) } @@ -113,7 +113,7 @@ func createConfig(bz []byte) (c Config, err error) { return c, errors.New("gas prices must be set") } - // NOTE: 0 is valid value for RetryAttempts + if c.RetryDelay == 0 { c.RetryDelay = defaultRpcRetryDelay @@ -128,11 +128,11 @@ func createConfig(bz []byte) (c Config, err error) { return c, nil } -// Start prepares DataAvailabilityLayerClient to work. + func (c *DataAvailabilityLayerClient) Start() (err error) { c.logger.Info("Starting Celestia Data Availability Layer Client.") - // other client has already been set + if c.rpc != nil { c.logger.Info("Celestia-node client already set.") return nil @@ -150,7 +150,7 @@ func (c *DataAvailabilityLayerClient) Start() (err error) { return } -// Stop stops DataAvailabilityLayerClient. + func (c *DataAvailabilityLayerClient) Stop() error { c.logger.Info("Stopping Celestia Data Availability Layer Client.") err := c.pubsubServer.Stop() @@ -162,17 +162,17 @@ func (c *DataAvailabilityLayerClient) Stop() error { return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Celestia } -// SubmitBatch submits a batch to the DA layer. + func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { data, err := batch.MarshalBinary() if err != nil { @@ -204,10 +204,10 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{} default: - // TODO(srene): Split batch in multiple blobs if necessary if supported + height, commitment, err := c.submit(data) if errors.Is(err, gerrc.ErrInternal) { - // no point retrying if it's because of our code being wrong + err = fmt.Errorf("submit: %w", err) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -273,7 +273,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet resultRetrieveBatch = c.retrieveBatches(daMetaData) return resultRetrieveBatch.Error }, - retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -368,7 +368,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu return nil }, - retry.Attempts(uint(*c.config.RetryAttempts)), //nolint:gosec // RetryAttempts should be always positive + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -392,7 +392,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu dah, err := c.getDataAvailabilityHeaders(daMetaData.Height) if err != nil { - // Returning Data Availability header Data Root for dispute validation + return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -407,10 +407,10 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu proof, err := c.getProof(daMetaData) if err != nil || proof == nil { - // TODO (srene): Not getting proof means there is no existing data for the namespace and the commitment (the commitment is wrong). - // Therefore we need to prove whether the commitment is wrong or the span does not exists. - // In case the span is correct it is necessary to return the data for the span and the proofs to the data root, so we can prove the data - // is the data for the span, and reproducing the commitment will generate a different one. + + + + return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -433,9 +433,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu if daMetaData.Index > 0 && daMetaData.Length > 0 { if index != daMetaData.Index || shares != daMetaData.Length { - // TODO (srene): In this case the commitment is correct but does not match the span. - // If the span is correct we have to repeat the previous step (sending data + proof of data) - // In case the span is not correct we need to send unavailable proof by sending proof of any row root to data root + + + return da.ResultCheckBatch{ CheckMetaData: DACheckMetaData, BaseResult: da.BaseResult{ @@ -449,9 +449,9 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } included, err = c.validateProof(daMetaData, proof) - // The both cases below (there is an error validating the proof or the proof is wrong) should not happen - // if we consider correct functioning of the celestia light node. - // This will only happen in case the previous step the celestia light node returned wrong proofs.. + + + if err != nil { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -485,7 +485,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } } -// Submit submits the Blobs to Data Availability layer. + func (c *DataAvailabilityLayerClient) submit(daBlob da.Blob) (uint64, da.Commitment, error) { blobs, commitments, err := c.blobsAndCommitments(daBlob) if err != nil { @@ -554,7 +554,7 @@ func (c *DataAvailabilityLayerClient) getDataAvailabilityHeaders(height uint64) return headers.DAH, nil } -// Celestia syncing in background + func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { sync := func() error { done := make(chan error, 1) @@ -579,7 +579,7 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } err := retry.Do(sync, - retry.Attempts(0), // try forever + retry.Attempts(0), retry.Delay(10*time.Second), retry.LastErrorOnly(true), retry.DelayType(retry.FixedDelay), @@ -596,12 +596,12 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSizeBytes } -// GetSignerBalance returns the balance for a specific address + func (d *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ctx, cancel := context.WithTimeout(d.ctx, d.config.Timeout) defer cancel() diff --git a/da/celestia/config.go b/da/celestia/config.go index 025a42e33..a1f764d4d 100644 --- a/da/celestia/config.go +++ b/da/celestia/config.go @@ -24,7 +24,7 @@ var defaultSubmitBackoff = uretry.NewBackoffConfig( uretry.WithMaxDelay(time.Second*6), ) -// Config stores Celestia DALC configuration parameters. + type Config struct { BaseURL string `json:"base_url,omitempty"` AppNodeURL string `json:"app_node_url,omitempty"` @@ -60,13 +60,13 @@ func (c *Config) InitNamespaceID() error { if c.NamespaceIDStr == "" { c.NamespaceIDStr = generateRandNamespaceID() } - // Decode NamespaceID from string to byte array + namespaceBytes, err := hex.DecodeString(c.NamespaceIDStr) if err != nil { return fmt.Errorf("decode string: %w", err) } - // Check if NamespaceID is of correct length (10 bytes) + if len(namespaceBytes) != openrpcns.NamespaceVersionZeroIDSize { return fmt.Errorf("wrong length: got: %v: expect %v", len(namespaceBytes), openrpcns.NamespaceVersionZeroIDSize) } diff --git a/da/celestia/mock/messages.go b/da/celestia/mock/messages.go index cf97dd2c5..d0140a084 100644 --- a/da/celestia/mock/messages.go +++ b/da/celestia/mock/messages.go @@ -5,8 +5,8 @@ import ( "encoding/binary" ) -// This code is extracted from celestia-app. It's here to build shares from messages (serialized blocks). -// TODO(tzdybal): if we stop using `/namespaced_shares` we can get rid of this file. + + const ( shareSize = 256 @@ -14,8 +14,8 @@ const ( msgShareSize = shareSize - namespaceSize ) -// splitMessage breaks the data in a message into the minimum number of -// namespaced shares + + func splitMessage(rawData []byte, nid []byte) []NamespacedShare { shares := make([]NamespacedShare, 0) firstRawShare := append(append( @@ -40,10 +40,10 @@ func splitMessage(rawData []byte, nid []byte) []NamespacedShare { return shares } -// Share contains the raw share data without the corresponding namespace. + type Share []byte -// NamespacedShare extends a Share with the corresponding namespace. + type NamespacedShare struct { Share ID []byte @@ -68,8 +68,8 @@ func zeroPadIfNecessary(share []byte, width int) []byte { return share } -// marshalDelimited marshals the raw data (excluding the namespace) of this -// message and prefixes it with the length of that encoding. + + func marshalDelimited(data []byte) ([]byte, error) { lenBuf := make([]byte, binary.MaxVarintLen64) length := uint64(len(data)) @@ -77,8 +77,8 @@ func marshalDelimited(data []byte) ([]byte, error) { return append(lenBuf[:n], data...), nil } -// appendToShares appends raw data as shares. -// Used to build shares from blocks/messages. + + func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []NamespacedShare { if len(rawData) <= msgShareSize { rawShare := append(append( @@ -89,7 +89,7 @@ func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []Name paddedShare := zeroPadIfNecessary(rawShare, shareSize) share := NamespacedShare{paddedShare, nid} shares = append(shares, share) - } else { // len(rawData) > msgShareSize + } else { shares = append(shares, splitMessage(rawData, nid)...) } return shares diff --git a/da/celestia/mock/server.go b/da/celestia/mock/server.go index 8b76d44fb..98434285a 100644 --- a/da/celestia/mock/server.go +++ b/da/celestia/mock/server.go @@ -20,7 +20,7 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// Server mocks celestia-node HTTP API. + type Server struct { da *local.DataAvailabilityLayerClient blockTime time.Duration @@ -28,7 +28,7 @@ type Server struct { logger types.Logger } -// NewServer creates new instance of Server. + func NewServer(blockTime time.Duration, logger types.Logger) *Server { return &Server{ da: new(local.DataAvailabilityLayerClient), @@ -37,7 +37,7 @@ func NewServer(blockTime time.Duration, logger types.Logger) *Server { } } -// Start starts HTTP server with given listener. + func (s *Server) Start(listener net.Listener) error { err := s.da.Init([]byte(s.blockTime.String()), pubsub.NewServer(), store.NewDefaultInMemoryKVStore(), s.logger) if err != nil { @@ -56,7 +56,7 @@ func (s *Server) Start(listener net.Listener) error { return nil } -// Stop shuts down the Server. + func (s *Server) Stop() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() diff --git a/da/celestia/rpc.go b/da/celestia/rpc.go index f4dac9d64..be0265f1f 100644 --- a/da/celestia/rpc.go +++ b/da/celestia/rpc.go @@ -14,49 +14,49 @@ import ( var _ types.CelestiaRPCClient = &OpenRPC{} -// OpenRPC is a wrapper around the openrpc client. + type OpenRPC struct { rpc *openrpc.Client } -// NewOpenRPC creates a new openrpc client. + func NewOpenRPC(rpc *openrpc.Client) *OpenRPC { return &OpenRPC{ rpc: rpc, } } -// GetAll gets all blobs. + func (c *OpenRPC) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { return c.rpc.Blob.GetAll(ctx, height, namespaces) } -// Submit blobs. + func (c *OpenRPC) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { return c.rpc.Blob.Submit(ctx, blobs, options) } -// GetProof gets the proof for a specific share commitment. + func (c *OpenRPC) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { return c.rpc.Blob.GetProof(ctx, height, namespace, commitment) } -// Get blob for a specific share commitment + func (c *OpenRPC) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { return c.rpc.Blob.Get(ctx, height, namespace, commitment) } -// GetByHeight gets the header by height + func (c *OpenRPC) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return c.rpc.Header.GetByHeight(ctx, height) } -// Included checks if a blob is included in the chain + func (c *OpenRPC) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { return c.rpc.Blob.Included(ctx, height, namespace, proof, commitment) } -// GetSignerBalance balance for a specific address + func (c *OpenRPC) GetSignerBalance(ctx context.Context) (*state.Balance, error) { return c.rpc.State.Balance(ctx) } diff --git a/da/celestia/types/rpc.go b/da/celestia/types/rpc.go index 2949f65ec..8fded2362 100644 --- a/da/celestia/types/rpc.go +++ b/da/celestia/types/rpc.go @@ -10,16 +10,16 @@ import ( ) type CelestiaRPCClient interface { - /* ---------------------------------- blob ---------------------------------- */ + Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) GetAll(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) - /* --------------------------------- header --------------------------------- */ + GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) - /* ---------------------------------- state --------------------------------- */ + GetSignerBalance(ctx context.Context) (*state.Balance, error) } diff --git a/da/celestia/types/types.go b/da/celestia/types/types.go index 9a10f3a0b..52be192a6 100644 --- a/da/celestia/types/types.go +++ b/da/celestia/types/types.go @@ -4,74 +4,74 @@ import ( "math" ) -// These constants were originally sourced from: -// https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants -// -// They can not change throughout the lifetime of a network. + + + + const ( - // NamespaceVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 - // NamespaceVersionMaxValue is the maximum value a namespace version can be. - // This const must be updated if NamespaceVersionSize is changed. + + NamespaceVersionMaxValue = math.MaxUint8 - // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 28 - // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = NamespaceVersionSize + NamespaceIDSize - // ShareSize is the size of a share in bytes. + ShareSize = 512 - // ShareInfoBytes is the number of bytes reserved for information. The info - // byte contains the share version and a sequence start idicator. + + ShareInfoBytes = 1 - // SequenceLenBytes is the number of bytes reserved for the sequence length - // that is present in the first share of a sequence. + + SequenceLenBytes = 4 - // ShareVersionZero is the first share version format. + ShareVersionZero = uint8(0) - // DefaultShareVersion is the defacto share version. Use this if you are - // unsure of which version to use. + + DefaultShareVersion = ShareVersionZero - // CompactShareReservedBytes is the number of bytes reserved for the location of - // the first unit (transaction, ISR) in a compact share. + + CompactShareReservedBytes = 4 - // FirstCompactShareContentSize is the number of bytes usable for data in - // the first compact share of a sequence. + + FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes - // ContinuationCompactShareContentSize is the number of bytes usable for - // data in a continuation compact share of a sequence. + + ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes - // FirstSparseShareContentSize is the number of bytes usable for data in the - // first sparse share of a sequence. + + FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - // ContinuationSparseShareContentSize is the number of bytes usable for data - // in a continuation sparse share of a sequence. + + ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - // MinSquareSize is the smallest original square width. + MinSquareSize = 1 - // MinshareCount is the minimum number of shares allowed in the original - // data square. + + MinShareCount = MinSquareSize * MinSquareSize - // MaxShareVersion is the maximum value a share version can be. + MaxShareVersion = 127 - // Celestia matrix size + DefaultGovMaxSquareSize = 64 - // Default maximum bytes per blob allowed + DefaultMaxBytes = DefaultGovMaxSquareSize * DefaultGovMaxSquareSize * ContinuationSparseShareContentSize ) diff --git a/da/da.go b/da/da.go index 3bde8023f..cd85c0e7d 100644 --- a/da/da.go +++ b/da/da.go @@ -15,30 +15,30 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// StatusCode is a type for DA layer return status. -// TODO: define an enum of different non-happy-path cases -// that might need to be handled by Dymint independent of -// the underlying DA chain. Use int32 to match the protobuf -// enum representation. + + + + + type StatusCode int32 -// Commitment should contain serialized cryptographic commitment to Blob value. + type Commitment = []byte -// Blob is the data submitted/received from DA interface. + type Blob = []byte -// Data Availability return codes. + const ( StatusUnknown StatusCode = iota StatusSuccess StatusError ) -// Client defines all the possible da clients + type Client string -// Data availability clients + const ( Mock Client = "mock" Celestia Client = "celestia" @@ -46,34 +46,34 @@ const ( Grpc Client = "grpc" ) -// Option is a function that sets a parameter on the da layer. + type Option func(DataAvailabilityLayerClient) -// BaseResult contains basic information returned by DA layer. + type BaseResult struct { - // Code is to determine if the action succeeded. + Code StatusCode - // Message may contain DA layer specific information (like DA block height/hash, detailed error message, etc) + Message string - // Error is the error returned by the DA layer + Error error } -// DAMetaData contains meta data about a batch on the Data Availability Layer. + type DASubmitMetaData struct { - // Height is the height of the block in the da layer + Height uint64 - // Namespace ID + Namespace []byte - // Client is the client to use to fetch data from the da layer + Client Client - // Share commitment, for each blob, used to obtain blobs and proofs + Commitment Commitment - // Initial position for each blob in the NMT + Index int - // Number of shares of each blob + Length int - // any NMT root for the specific height, necessary for non-inclusion proof + Root []byte } @@ -84,9 +84,9 @@ type Balance struct { const PathSeparator = "|" -// ToPath converts a DAMetaData to a path. + func (d *DASubmitMetaData) ToPath() string { - // convert uint64 to string + if d.Commitment != nil { commitment := hex.EncodeToString(d.Commitment) dataroot := hex.EncodeToString(d.Root) @@ -109,7 +109,7 @@ func (d *DASubmitMetaData) ToPath() string { } } -// FromPath parses a path to a DAMetaData. + func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { pathParts := strings.FieldsFunc(path, func(r rune) bool { return r == rune(PathSeparator[0]) }) if len(pathParts) < 2 { @@ -125,7 +125,7 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { Height: height, Client: Client(pathParts[0]), } - // TODO: check per DA and panic if not enough parts + if len(pathParts) == 7 { submitData.Index, err = strconv.Atoi(pathParts[2]) if err != nil { @@ -152,93 +152,93 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { return submitData, nil } -// DAMetaData contains meta data about a batch on the Data Availability Layer. + type DACheckMetaData struct { - // Height is the height of the block in the da layer + Height uint64 - // Client is the client to use to fetch data from the da layer + Client Client - // Submission index in the Hub + SLIndex uint64 - // Namespace ID + Namespace []byte - // Share commitment, for each blob, used to obtain blobs and proofs + Commitment Commitment - // Initial position for each blob in the NMT + Index int - // Number of shares of each blob + Length int - // Proofs necessary to validate blob inclusion in the specific height + Proofs []*blob.Proof - // NMT roots for each NMT Proof + NMTRoots []byte - // Proofs necessary to validate blob inclusion in the specific height + RowProofs []*merkle.Proof - // any NMT root for the specific height, necessary for non-inclusion proof + Root []byte } -// ResultSubmitBatch contains information returned from DA layer after block submission. + type ResultSubmitBatch struct { BaseResult - // DAHeight informs about a height on Data Availability Layer for given result. + SubmitMetaData *DASubmitMetaData } -// ResultCheckBatch contains information about block availability, returned from DA layer client. + type ResultCheckBatch struct { BaseResult - // DAHeight informs about a height on Data Availability Layer for given result. + CheckMetaData *DACheckMetaData } -// ResultRetrieveBatch contains batch of blocks returned from DA layer client. + type ResultRetrieveBatch struct { BaseResult - // Block is the full block retrieved from Data Availability Layer. - // If Code is not equal to StatusSuccess, it has to be nil. + + Batches []*types.Batch - // DAHeight informs about a height on Data Availability Layer for given result. + CheckMetaData *DACheckMetaData } -// DataAvailabilityLayerClient defines generic interface for DA layer block submission. -// It also contains life-cycle methods. + + type DataAvailabilityLayerClient interface { - // Init is called once to allow DA client to read configuration and initialize resources. + Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...Option) error - // Start is called once, after Init. It's implementation should start operation of DataAvailabilityLayerClient. + Start() error - // Stop is called once, when DataAvailabilityLayerClient is no longer needed. + Stop() error - // SubmitBatch submits the passed in block to the DA layer. - // This should create a transaction which (potentially) - // triggers a state transition in the DA layer. + + + SubmitBatch(batch *types.Batch) ResultSubmitBatch GetClientType() Client - // CheckBatchAvailability checks the availability of the blob submitted getting proofs and validating them + CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch - // Used to check when the DA light client finished syncing + WaitForSyncing() - // Returns the maximum allowed blob size in the DA, used to check the max batch size configured + GetMaxBlobSizeBytes() uint32 - // GetSignerBalance returns the balance for a specific address + GetSignerBalance() (Balance, error) } -// BatchRetriever is additional interface that can be implemented by Data Availability Layer Client that is able to retrieve -// block data from DA layer. This gives the ability to use it for block synchronization. + + type BatchRetriever interface { - // RetrieveBatches returns blocks at given data layer height from data availability layer. + RetrieveBatches(daMetaData *DASubmitMetaData) ResultRetrieveBatch - // CheckBatchAvailability checks the availability of the blob received getting proofs and validating them + CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch } diff --git a/da/errors.go b/da/errors.go index ba02343a8..dca7871b7 100644 --- a/da/errors.go +++ b/da/errors.go @@ -7,26 +7,26 @@ import ( ) var ( - // ErrFailedTxBuild is returned when transaction build fails. + ErrTxBroadcastConfigError = errors.New("failed building tx") - // ErrFailedTxBroadcast is returned when transaction broadcast fails. + ErrTxBroadcastNetworkError = errors.New("failed broadcasting tx") - // ErrTxBroadcastTimeout is returned when transaction broadcast times out. + ErrTxBroadcastTimeout = errors.New("broadcast timeout error") - // ErrUnableToGetProof is returned when proof is not available. + ErrUnableToGetProof = errors.New("unable to get proof") - // ErrRetrieval is returned when retrieval rpc falls + ErrRetrieval = errors.New("retrieval failed") - // ErrBlobNotFound is returned when blob is not found. + ErrBlobNotFound = errors.New("blob not found") - // ErrBlobNotIncluded is returned when blob is not included. + ErrBlobNotIncluded = errors.New("blob not included") - // ErrBlobNotParsed is returned when blob cannot be parsed + ErrBlobNotParsed = errors.New("unable to parse blob to batch") - // ErrProofNotMatching is returned when proof does not match. + ErrProofNotMatching = errors.New("proof not matching") - // ErrNameSpace is returned when wrong namespace used + ErrNameSpace = errors.New("namespace not matching") - // ErrDAMismatch is returned when the DA client used does not match the da client specified in the da path of the state update + ErrDAMismatch = gerrc.ErrInvalidArgument.Wrap("DA in config not matching DA path") ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index 8636cf583..7daa0c667 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -16,9 +16,9 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -const maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) +const maxBlobSize = 2097152 + -// DataAvailabilityLayerClient is a generic client that proxies all DA requests via gRPC. type DataAvailabilityLayerClient struct { config Config @@ -28,14 +28,14 @@ type DataAvailabilityLayerClient struct { logger types.Logger } -// Config contains configuration options for DataAvailabilityLayerClient. + type Config struct { - // TODO(tzdybal): add more options! + Host string `json:"host"` Port int `json:"port"` } -// DefaultConfig defines default values for DataAvailabilityLayerClient configuration. + var DefaultConfig = Config{ Host: "127.0.0.1", Port: 7980, @@ -46,7 +46,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// Init sets the configuration options. + func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { d.logger = logger d.synced = make(chan struct{}, 1) @@ -57,14 +57,14 @@ func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ st return json.Unmarshal(config, &d.config) } -// Start creates connection to gRPC server and instantiates gRPC client. + func (d *DataAvailabilityLayerClient) Start() error { d.logger.Info("starting GRPC DALC", "host", d.config.Host, "port", d.config.Port) d.synced <- struct{}{} var err error var opts []grpc.DialOption - // TODO(tzdybal): add more options + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) d.conn, err = grpc.Dial(d.config.Host+":"+strconv.Itoa(d.config.Port), opts...) if err != nil { @@ -75,23 +75,23 @@ func (d *DataAvailabilityLayerClient) Start() error { return nil } -// Stop closes connection to gRPC server. + func (d *DataAvailabilityLayerClient) Stop() error { d.logger.Info("stopping GRPC DALC") return d.conn.Close() } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (d *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Grpc } -// SubmitBatch proxies SubmitBatch request to gRPC server. + func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { resp, err := d.client.SubmitBatch(context.TODO(), &dalc.SubmitBatchRequest{Batch: batch.ToProto()}) if err != nil { @@ -111,7 +111,7 @@ func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } -// CheckBatchAvailability proxies CheckBatchAvailability request to gRPC server. + func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { resp, err := d.client.CheckBatchAvailability(context.TODO(), &dalc.CheckBatchAvailabilityRequest{DataLayerHeight: daMetaData.Height}) if err != nil { @@ -122,12 +122,12 @@ func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } -// RetrieveBatches proxies RetrieveBlocks request to gRPC server. + func (d *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { resp, err := d.client.RetrieveBatches(context.TODO(), &dalc.RetrieveBatchesRequest{DataLayerHeight: daMetaData.Height}) if err != nil { diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index a8f21e508..e303e2901 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// GetServer creates and returns gRPC server instance. + func GetServer(kv store.KV, conf grpcda.Config, mockConfig []byte) *grpc.Server { logger := tmlog.NewTMLogger(os.Stdout) diff --git a/da/local/local.go b/da/local/local.go index 3852b2797..009beaab8 100644 --- a/da/local/local.go +++ b/da/local/local.go @@ -1,7 +1,7 @@ package local import ( - "crypto/sha1" //#nosec + "crypto/sha1" "encoding/binary" "math/rand" "sync/atomic" @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -// DataAvailabilityLayerClient is intended only for usage in tests. -// It does actually ensures DA - it stores data in-memory. + + type DataAvailabilityLayerClient struct { logger types.Logger dalcKV store.KV @@ -26,7 +26,7 @@ type DataAvailabilityLayerClient struct { const ( defaultBlockTime = 3 * time.Second - maxBlobSize = 2097152 // 2MB (equivalent to avail or celestia) + maxBlobSize = 2097152 ) type config struct { @@ -38,7 +38,7 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) -// Init is called once to allow DA client to read configuration and initialize resources. + func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalcKV store.KV, logger types.Logger, options ...da.Option) error { m.logger = logger m.dalcKV = dalcKV @@ -56,7 +56,7 @@ func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalc return nil } -// Start implements DataAvailabilityLayerClient interface. + func (m *DataAvailabilityLayerClient) Start() error { m.logger.Debug("Mock Data Availability Layer Client starting") m.synced <- struct{}{} @@ -70,26 +70,26 @@ func (m *DataAvailabilityLayerClient) Start() error { return nil } -// Stop implements DataAvailabilityLayerClient interface. + func (m *DataAvailabilityLayerClient) Stop() error { m.logger.Debug("Mock Data Availability Layer Client stopped") close(m.synced) return nil } -// WaitForSyncing is used to check when the DA light client finished syncing + func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } -// GetClientType returns client type. + func (m *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Mock } -// SubmitBatch submits the passed in batch to the DA layer. -// This should create a transaction which (potentially) -// triggers a state transition in the DA layer. + + + func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { daHeight := m.daHeight.Load() @@ -99,7 +99,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) //#nosec + hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) err = m.dalcKV.Set(getKey(daHeight, batch.StartHeight()), hash[:]) if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} @@ -109,7 +109,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - m.daHeight.Store(daHeight + 1) // guaranteed no ABA problem as submit batch is only called when the object is locked + m.daHeight.Store(daHeight + 1) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -123,13 +123,13 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } -// CheckBatchAvailability queries DA layer to check data availability of block corresponding to given header. + func (m *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { batchesRes := m.RetrieveBatches(daMetaData) return da.ResultCheckBatch{BaseResult: da.BaseResult{Code: batchesRes.Code, Message: batchesRes.Message, Error: batchesRes.Error}, CheckMetaData: batchesRes.CheckMetaData} } -// RetrieveBatches returns block at given height from data availability layer. + func (m *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { if daMetaData.Height >= m.daHeight.Load() { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: "batch not found", Error: da.ErrBlobNotFound}} @@ -174,11 +174,11 @@ func getKey(daHeight uint64, height uint64) []byte { } func (m *DataAvailabilityLayerClient) updateDAHeight() { - blockStep := rand.Uint64()%10 + 1 //#nosec + blockStep := rand.Uint64()%10 + 1 m.daHeight.Add(blockStep) } -// GetMaxBlobSizeBytes returns the maximum allowed blob size in the DA, used to check the max batch size configured + func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } diff --git a/da/registry/registry.go b/da/registry/registry.go index b520c41c9..4779e2ba0 100644 --- a/da/registry/registry.go +++ b/da/registry/registry.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/dymint/da/local" ) -// this is a central registry for all Data Availability Layer Clients + var clients = map[string]func() da.DataAvailabilityLayerClient{ "mock": func() da.DataAvailabilityLayerClient { return &local.DataAvailabilityLayerClient{} }, "grpc": func() da.DataAvailabilityLayerClient { return &grpc.DataAvailabilityLayerClient{} }, @@ -16,7 +16,7 @@ var clients = map[string]func() da.DataAvailabilityLayerClient{ "avail": func() da.DataAvailabilityLayerClient { return &avail.DataAvailabilityLayerClient{} }, } -// GetClient returns client identified by name. + func GetClient(name string) da.DataAvailabilityLayerClient { f, ok := clients[name] if !ok { @@ -25,7 +25,7 @@ func GetClient(name string) da.DataAvailabilityLayerClient { return f() } -// RegisteredClients returns names of all DA clients in registry. + func RegisteredClients() []string { registered := make([]string, 0, len(clients)) for name := range clients { diff --git a/indexers/blockindexer/block.go b/indexers/blockindexer/block.go index 08d2f6d16..0ac87ba8f 100644 --- a/indexers/blockindexer/block.go +++ b/indexers/blockindexer/block.go @@ -8,19 +8,19 @@ import ( "github.com/tendermint/tendermint/types" ) -// BlockIndexer defines an interface contract for indexing block events. + type BlockIndexer interface { - // Has returns true if the given height has been indexed. An error is returned - // upon database query failure. + + Has(height int64) (bool, error) - // Index indexes BeginBlock and EndBlock events for a given block by its height. + Index(types.EventDataNewBlockHeader) error - // Search performs a query for block heights that match a given BeginBlock - // and Endblock event search criteria. + + Search(ctx context.Context, q *query.Query) ([]int64, error) - // Delete indexed block entries up to (but not including) a height. It returns number of entries pruned. + Prune(from, to uint64, logger log.Logger) (uint64, error) } diff --git a/indexers/blockindexer/kv/kv.go b/indexers/blockindexer/kv/kv.go index bb8ee295c..d2b1b813a 100644 --- a/indexers/blockindexer/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -27,9 +27,9 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock -// events with an underlying KV store. Block events are indexed by their height, -// such that matching search criteria returns the respective block height(s). + + + type BlockerIndexer struct { store store.KV } @@ -40,8 +40,8 @@ func New(store store.KV) *BlockerIndexer { } } -// Has returns true if the given height has been indexed. An error is returned -// upon database query failure. + + func (idx *BlockerIndexer) Has(height int64) (bool, error) { key, err := heightKey(height) if err != nil { @@ -55,18 +55,18 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return err == nil, err } -// Index indexes BeginBlock and EndBlock events for a given block by its height. -// The following is indexed: -// -// primary key: encode(block.height | height) => encode(height) -// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) -// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) + + + + + + func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Discard() height := bh.Header.Height - // 1. index by height + key, err := heightKey(height) if err != nil { return fmt.Errorf("create block height index key: %w", err) @@ -75,18 +75,18 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return err } - // 2. index BeginBlock events + beginKeys, err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height) if err != nil { return fmt.Errorf("index BeginBlock events: %w", err) } - // 3. index EndBlock events + endKeys, err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height) if err != nil { return fmt.Errorf("index EndBlock events: %w", err) } - // 4. index all eventkeys by height key for easy pruning + err = idx.addEventKeys(height, &beginKeys, &endKeys, batch) if err != nil { return err @@ -94,11 +94,11 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return batch.Commit() } -// Search performs a query for block heights that match a given BeginBlock -// and Endblock event search criteria. The given query can match against zero, -// one or more block heights. In the case of height queries, i.e. block.height=H, -// if the height is indexed, that height alone will be returned. An error and -// nil slice is returned. Otherwise, a non-nil slice and nil error is returned. + + + + + func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { @@ -113,8 +113,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return nil, fmt.Errorf("parse query conditions: %w", err) } - // If there is an exact height query, return the result immediately - // (if it exists). + + height, ok := lookForHeight(conditions) if ok { ok, err := idx.Has(height) @@ -132,11 +132,11 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, var heightsInitialized bool filteredHeights := make(map[string][]byte) - // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) - // Extract ranges. If both upper and lower bounds exist, it's better to get - // them in order as to not iterate over kvs that are not within range. + + ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -155,8 +155,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - // Ignore any remaining conditions if the first condition resulted in no - // matches (assuming implicit AND operand). + + if len(filteredHeights) == 0 { break } @@ -169,7 +169,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - // for all other conditions + for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -188,8 +188,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - // Ignore any remaining conditions if the first condition resulted in no - // matches (assuming implicit AND operand). + + if len(filteredHeights) == 0 { break } @@ -201,7 +201,7 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - // fetch matching heights + results = make([]int64, 0, len(filteredHeights)) for _, hBz := range filteredHeights { cont := true @@ -232,12 +232,12 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return results, nil } -// matchRange returns all matching block heights that match a given QueryRange -// and start key. An already filtered result (filteredHeights) is provided such -// that any non-intersecting matches are removed. -// -// NOTE: The provided filteredHeights may be empty if no previous condition has -// matched. + + + + + + func (idx *BlockerIndexer) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -245,8 +245,8 @@ func (idx *BlockerIndexer) matchRange( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -314,18 +314,18 @@ LOOP: } if len(tmpHeights) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHeights, nil } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHeights { cont := true @@ -348,12 +348,12 @@ LOOP: return filteredHeights, nil } -// match returns all matching heights that meet a given query condition and start -// key. An already filtered result (filteredHeights) is provided such that any -// non-intersecting matches are removed. -// -// NOTE: The provided filteredHeights may be empty if no previous condition has -// matched. + + + + + + func (idx *BlockerIndexer) match( ctx context.Context, c query.Condition, @@ -361,8 +361,8 @@ func (idx *BlockerIndexer) match( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -457,18 +457,18 @@ func (idx *BlockerIndexer) match( } if len(tmpHeights) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHeights, nil } - // Remove/reduce matches in filteredHeights that were not found in this - // match (tmpHeights). + + for k := range filteredHeights { cont := true @@ -495,7 +495,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, heightBz := int64ToBytes(height) keys := dmtypes.EventKeys{} for _, event := range events { - // only index events with a non-empty type + if len(event.Type) == 0 { continue } @@ -505,7 +505,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, continue } - // index iff the event specified index:true and it's not a reserved event + compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if compositeKey == tmtypes.BlockHeightKey { return dmtypes.EventKeys{}, fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) @@ -546,9 +546,9 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint return nil } - for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 + for h := int64(from); h < int64(to); h++ { - // flush every 1000 blocks to avoid batches becoming too large + if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -592,7 +592,7 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint } - err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 + err := flush(batch, int64(to)) if err != nil { return 0, err } diff --git a/indexers/blockindexer/null/null.go b/indexers/blockindexer/null/null.go index e6ee3335f..ab80fa5a9 100644 --- a/indexers/blockindexer/null/null.go +++ b/indexers/blockindexer/null/null.go @@ -13,7 +13,7 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// TxIndex implements a no-op block indexer. + type BlockerIndexer struct{} func (idx *BlockerIndexer) Has(height int64) (bool, error) { diff --git a/indexers/blockindexer/query_range.go b/indexers/blockindexer/query_range.go index b4edf53c5..9b2798524 100644 --- a/indexers/blockindexer/query_range.go +++ b/indexers/blockindexer/query_range.go @@ -6,21 +6,21 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) -// QueryRanges defines a mapping between a composite event key and a QueryRange. -// -// e.g.account.number => queryRange{lowerBound: 1, upperBound: 5} + + + type QueryRanges map[string]QueryRange -// QueryRange defines a range within a query condition. + type QueryRange struct { - LowerBound interface{} // int || time.Time - UpperBound interface{} // int || time.Time + LowerBound interface{} + UpperBound interface{} Key string IncludeLowerBound bool IncludeUpperBound bool } -// AnyBound returns either the lower bound if non-nil, otherwise the upper bound. + func (qr QueryRange) AnyBound() interface{} { if qr.LowerBound != nil { return qr.LowerBound @@ -29,8 +29,8 @@ func (qr QueryRange) AnyBound() interface{} { return qr.UpperBound } -// LowerBoundValue returns the value for the lower bound. If the lower bound is -// nil, nil will be returned. + + func (qr QueryRange) LowerBoundValue() interface{} { if qr.LowerBound == nil { return nil @@ -52,8 +52,8 @@ func (qr QueryRange) LowerBoundValue() interface{} { } } -// UpperBoundValue returns the value for the upper bound. If the upper bound is -// nil, nil will be returned. + + func (qr QueryRange) UpperBoundValue() interface{} { if qr.UpperBound == nil { return nil @@ -75,8 +75,8 @@ func (qr QueryRange) UpperBoundValue() interface{} { } } -// LookForRanges returns a mapping of QueryRanges and the matching indexes in -// the provided query conditions. + + func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { @@ -110,8 +110,8 @@ func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes [] return ranges, indexes } -// IsRangeOperation returns a boolean signifying if a query Operator is a range -// operation or not. + + func IsRangeOperation(op query.Operator) bool { switch op { case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: diff --git a/indexers/txindex/indexer.go b/indexers/txindex/indexer.go index 281c1dccc..6e275a021 100644 --- a/indexers/txindex/indexer.go +++ b/indexers/txindex/indexer.go @@ -10,33 +10,33 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) -// TxIndexer interface defines methods to index and search transactions. + type TxIndexer interface { - // AddBatch analyzes, indexes and stores a batch of transactions. + AddBatch(b *Batch) error - // Index analyzes, indexes and stores a single transaction. + Index(result *abci.TxResult) error - // Get returns the transaction specified by hash or nil if the transaction is not indexed - // or stored. + + Get(hash []byte) (*abci.TxResult, error) - // Search allows you to query for transactions. + Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) - // Delete index entries for the heights between from (included) and to (not included). It returns heights pruned + Prune(from, to uint64, logger log.Logger) (uint64, error) } -// Batch groups together multiple Index operations to be performed at the same time. -// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. + + type Batch struct { Height int64 Ops []*abci.TxResult } -// NewBatch creates a new Batch. + func NewBatch(n int64, height int64) *Batch { return &Batch{ Height: height, @@ -44,16 +44,16 @@ func NewBatch(n int64, height int64) *Batch { } } -// Add or update an entry for the given result.Index. + func (b *Batch) Add(result *abci.TxResult) error { b.Ops[result.Index] = result return nil } -// Size returns the total number of operations inside the batch. + func (b *Batch) Size() int { return len(b.Ops) } -// ErrorEmptyHash indicates empty hash + var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/indexers/txindex/indexer_service.go b/indexers/txindex/indexer_service.go index e5ec76696..16e022f92 100644 --- a/indexers/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -11,14 +11,14 @@ import ( "github.com/tendermint/tendermint/types" ) -// XXX/TODO: These types should be moved to the indexer package. + const ( subscriber = "IndexerService" ) -// IndexerService connects event bus, transaction and block indexers together in -// order to index transactions and blocks coming from the event bus. + + type IndexerService struct { service.BaseService @@ -27,7 +27,7 @@ type IndexerService struct { eventBus *types.EventBus } -// NewIndexerService returns a new service instance. + func NewIndexerService( txIdxr TxIndexer, blockIdxr indexer.BlockIndexer, @@ -38,12 +38,12 @@ func NewIndexerService( return is } -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. + + func (is *IndexerService) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // cancelled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. + + + blockHeadersSub, err := is.eventBus.Subscribe( context.Background(), subscriber, @@ -94,16 +94,16 @@ func (is *IndexerService) OnStart() error { return nil } -// OnStop implements service.Service by unsubscribing from all transactions. + func (is *IndexerService) OnStop() { if is.eventBus.IsRunning() { _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) } } -// Prune removes tx and blocks indexed up to (but not including) a height. + func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { - // load indexer base height + indexerBaseHeight, err := s.LoadIndexerBaseHeight() if errors.Is(err, gerrc.ErrNotFound) { @@ -112,19 +112,19 @@ func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { return 0, err } - // prune indexed blocks + blockPruned, err := is.blockIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return blockPruned, err } - // prune indexes txs + txPruned, err := is.txIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return txPruned, err } - // store indexer base height + err = s.SaveIndexerBaseHeight(to) if err != nil { is.Logger.Error("saving indexer base height", "err", err) diff --git a/indexers/txindex/kv/kv.go b/indexers/txindex/kv/kv.go index e1ea88910..485ba01ea 100644 --- a/indexers/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -29,20 +29,20 @@ const ( var _ txindex.TxIndexer = (*TxIndex)(nil) -// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). + type TxIndex struct { store store.KV } -// NewTxIndex creates new KV indexer. + func NewTxIndex(store store.KV) *TxIndex { return &TxIndex{ store: store, } } -// Get gets transaction from the TxIndex storage and returns it or nil if the -// transaction is not found. + + func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { if len(hash) == 0 { return nil, txindex.ErrorEmptyHash @@ -65,10 +65,10 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return txResult, nil } -// AddBatch indexes a batch of transactions using the given list of events. Each -// key that indexed from the tx's events is a composite of the event type and -// the respective attribute's key delimited by a "." (eg. "account.number"). -// Any event with an empty type is not indexed. + + + + func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Discard() @@ -77,13 +77,13 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := types.Tx(result.Tx).Hash() - // index tx by events + eventKeys, err := txi.indexEvents(result, hash, storeBatch) if err != nil { return err } eventKeysBatch.Keys = append(eventKeysBatch.Keys, eventKeys.Keys...) - // index by height (always) + err = storeBatch.Set(keyForHeight(result), hash) if err != nil { return err @@ -93,7 +93,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { if err != nil { return err } - // index by hash (always) + err = storeBatch.Set(hash, rawBytes) if err != nil { return err @@ -108,29 +108,29 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return storeBatch.Commit() } -// Index indexes a single transaction using the given list of events. Each key -// that indexed from the tx's events is a composite of the event type and the -// respective attribute's key delimited by a "." (eg. "account.number"). -// Any event with an empty type is not indexed. + + + + func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Discard() hash := types.Tx(result.Tx).Hash() - // index tx by events + eventKeys, err := txi.indexEvents(result, hash, b) if err != nil { return err } - // add event keys height index + err = txi.addEventKeys(result.Height, &eventKeys, b) if err != nil { return nil } - // index by height (always) + err = b.Set(keyForHeight(result), hash) if err != nil { return err @@ -140,7 +140,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { if err != nil { return err } - // index by hash (always) + err = b.Set(hash, rawBytes) if err != nil { return err @@ -152,7 +152,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.KVBatch) (dmtypes.EventKeys, error) { eventKeys := dmtypes.EventKeys{} for _, event := range result.Result.Events { - // only index events with a non-empty type + if len(event.Type) == 0 { continue } @@ -162,7 +162,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. continue } - // index if `index: true` is set + compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if attr.GetIndex() { err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) @@ -177,17 +177,17 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. return eventKeys, nil } -// Search performs a search using the given query. -// -// It breaks the query into conditions (like "tx.height > 5"). For each -// condition, it queries the DB index. One special use cases here: (1) if -// "tx.hash" is found, it returns tx result for it (2) for range queries it is -// better for the client to provide both lower and upper bounds, so we are not -// performing a full scan. Results from querying indexes are then intersected -// and returned to the caller, in no particular order. -// -// Search will exit early and return any result fetched so far, -// when a message is received on the context chan. + + + + + + + + + + + func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { select { case <-ctx.Done(): @@ -199,13 +199,13 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul var hashesInitialized bool filteredHashes := make(map[string][]byte) - // get a list of conditions (like "tx.height > 5") + conditions, err := q.Conditions() if err != nil { return nil, fmt.Errorf("during parsing conditions from query: %w", err) } - // if there is a hash condition, return the result immediately + hash, ok, err := lookForHash(conditions) if err != nil { return nil, fmt.Errorf("during searching for a hash in the query: %w", err) @@ -221,12 +221,12 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) - // extract ranges - // if both upper and lower bounds exist, it's better to get them in order not - // no iterate over kvs that are not within range. + + + ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -236,8 +236,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true) hashesInitialized = true - // Ignore any remaining conditions if the first condition resulted - // in no matches (assuming implicit AND operand). + + if len(filteredHashes) == 0 { break } @@ -247,10 +247,10 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - // if there is a height condition ("tx.height=3"), extract it + height := lookForHeight(conditions) - // for all other conditions + for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -260,8 +260,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true) hashesInitialized = true - // Ignore any remaining conditions if the first condition resulted - // in no matches (assuming implicit AND operand). + + if len(filteredHashes) == 0 { break } @@ -283,7 +283,7 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } results = append(results, res) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -308,7 +308,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) return } -// lookForHeight returns a height if there is an "height=X" condition. + func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { if c.CompositeKey == tmtypes.TxHeightKey && c.Op == query.OpEqual { @@ -318,11 +318,11 @@ func lookForHeight(conditions []query.Condition) (height int64) { return 0 } -// match returns all matching txs by hash that meet a given condition and start -// key. An already filtered result (filteredHashes) is provided such that any -// non-intersecting matches are removed. -// -// NOTE: filteredHashes may be empty if no previous condition has matched. + + + + + func (txi *TxIndex) match( ctx context.Context, c query.Condition, @@ -330,8 +330,8 @@ func (txi *TxIndex) match( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -348,7 +348,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -364,8 +364,8 @@ func (txi *TxIndex) match( } case c.Op == query.OpExists: - // XXX: can't use startKeyBz here because c.Operand is nil - // (e.g. "account.owner//" won't match w/ a single row) + + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -374,7 +374,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -390,9 +390,9 @@ func (txi *TxIndex) match( } case c.Op == query.OpContains: - // XXX: startKey does not apply here. - // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" - // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" + + + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -407,7 +407,7 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() } - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -426,25 +426,25 @@ func (txi *TxIndex) match( } if len(tmpHashes) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHashes } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -460,11 +460,11 @@ func (txi *TxIndex) match( return filteredHashes } -// matchRange returns all matching txs by hash that meet a given queryRange and -// start key. An already filtered result (filteredHashes) is provided such that -// any non-intersecting matches are removed. -// -// NOTE: filteredHashes may be empty if no previous condition has matched. + + + + + func (txi *TxIndex) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -472,8 +472,8 @@ func (txi *TxIndex) matchRange( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - // A previous match was attempted but resulted in no matches, so we return - // no matches (assuming AND operand). + + if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -512,15 +512,15 @@ LOOP: tmpHashes[string(it.Value())] = it.Value() } - // XXX: passing time in a ABCI Events is not yet implemented - // case time.Time: - // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - // if v == r.upperBound { - // break - // } + + + + + + } - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -536,25 +536,25 @@ LOOP: } if len(tmpHashes) == 0 || firstRun { - // Either: - // - // 1. Regardless if a previous match was attempted, which may have had - // results, but no match was found for the current condition, then we - // return no matches (assuming AND operand). - // - // 2. A previous match was not attempted, so we return all results. + + + + + + + return tmpHashes } - // Remove/reduce matches in filteredHashes that were not found in this - // match (tmpHashes). + + for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - // Potentially exit early. + select { case <-ctx.Done(): cont = false @@ -592,9 +592,9 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 return nil } - for h := int64(from); h < int64(to); h++ { //nolint:gosec // heights (from and to) are always positive and fall in int64 + for h := int64(from); h < int64(to); h++ { - // flush every 1000 txs to avoid batches becoming too large + if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -605,7 +605,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 toFlush = 0 } - // first all events are pruned associated to the same height + prunedEvents, err := txi.pruneEvents(h, batch) pruned += prunedEvents toFlush += prunedEvents @@ -614,10 +614,10 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 continue } - // then all txs indexed are iterated by height + it := txi.store.PrefixIterator(prefixForHeight(h)) - // and deleted all indexed (by hash and by keyheight) + for ; it.Valid(); it.Next() { toFlush++ if err := batch.Delete(it.Key()); err != nil { @@ -635,7 +635,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 } - err := flush(batch, int64(to)) //nolint:gosec // height is non-negative and falls in int64 + err := flush(batch, int64(to)) if err != nil { return 0, err } @@ -669,7 +669,7 @@ func (txi *TxIndex) pruneEvents(height int64, batch store.KVBatch) (uint64, erro } func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batch store.KVBatch) error { - // index event keys by height + eventKeyHeight, err := eventHeightKey(height) if err != nil { return err @@ -684,7 +684,7 @@ func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batc return nil } -// Keys + func isTagKey(key []byte) bool { return strings.Count(string(key), tagKeySeparator) == 3 diff --git a/indexers/txindex/kv/utils.go b/indexers/txindex/kv/utils.go index 73cb223f2..05cb12c90 100644 --- a/indexers/txindex/kv/utils.go +++ b/indexers/txindex/kv/utils.go @@ -4,7 +4,7 @@ import "github.com/google/orderedcode" const TxEventHeightKey = "txevent.height" -// IntInSlice returns true if a is found in the list. + func intInSlice(a int, list []int) bool { for _, b := range list { if b == a { diff --git a/indexers/txindex/null/null.go b/indexers/txindex/null/null.go index 426b08099..7d2167389 100644 --- a/indexers/txindex/null/null.go +++ b/indexers/txindex/null/null.go @@ -13,20 +13,20 @@ import ( var _ txindex.TxIndexer = (*TxIndex)(nil) -// TxIndex acts as a /dev/null. + type TxIndex struct{} -// Get on a TxIndex is disabled and panics when invoked. + func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } -// AddBatch is a noop and always returns nil. + func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } -// Index is a noop and always returns nil. + func (txi *TxIndex) Index(result *abci.TxResult) error { return nil } diff --git a/mempool/cache.go b/mempool/cache.go index 78aefa3c4..fdb11ea5b 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -7,31 +7,31 @@ import ( "github.com/tendermint/tendermint/types" ) -// TxCache defines an interface for raw transaction caching in a mempool. -// Currently, a TxCache does not allow direct reading or getting of transaction -// values. A TxCache is used primarily to push transactions and removing -// transactions. Pushing via Push returns a boolean telling the caller if the -// transaction already exists in the cache or not. + + + + + type TxCache interface { - // Reset resets the cache to an empty state. + Reset() - // Push adds the given raw transaction to the cache and returns true if it was - // newly added. Otherwise, it returns false. + + Push(tx types.Tx) bool - // Remove removes the given raw transaction from the cache. + Remove(tx types.Tx) - // Has reports whether tx is present in the cache. Checking for presence is - // not treated as an access of the value. + + Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) -// LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache -// only stores the hash of the raw transaction. + + type LRUTxCache struct { mtx sync.Mutex size int @@ -47,8 +47,8 @@ func NewLRUTxCache(cacheSize int) *LRUTxCache { } } -// GetList returns the underlying linked-list that backs the LRU cache. Note, -// this should be used for testing purposes only! + + func (c *LRUTxCache) GetList() *list.List { return c.list } @@ -109,7 +109,7 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { return ok } -// NopTxCache defines a no-op raw transaction cache. + type NopTxCache struct{} var _ TxCache = (*NopTxCache)(nil) diff --git a/mempool/clist/clist.go b/mempool/clist/clist.go index 2e4171b1c..ff94a4b49 100644 --- a/mempool/clist/clist.go +++ b/mempool/clist/clist.go @@ -1,15 +1,6 @@ package clist -/* -The purpose of CList is to provide a goroutine-safe linked-list. -This list can be traversed concurrently by any number of goroutines. -However, removed CElements cannot be added back. -NOTE: Not all methods of container/list are (yet) implemented. -NOTE: Removed elements need to DetachPrev or DetachNext consistently -to ensure garbage collection of removed elements. - -*/ import ( "fmt" @@ -18,29 +9,12 @@ import ( tmsync "github.com/tendermint/tendermint/libs/sync" ) -// MaxLength is the max allowed number of elements a linked list is -// allowed to contain. -// If more elements are pushed to the list it will panic. + + + const MaxLength = int(^uint(0) >> 1) -/* -CElement is an element of a linked-list -Traversal from a CElement is goroutine-safe. - -We can't avoid using WaitGroups or for-loops given the documentation -spec without re-implementing the primitives that already exist in -golang/sync. Notice that WaitGroup allows many go-routines to be -simultaneously released, which is what we want. Mutex doesn't do -this. RWMutex does this, but it's clumsy to use in the way that a -WaitGroup would be used -- and we'd end up having two RWMutex's for -prev/next each, which is doubly confusing. - -sync.Cond would be sort-of useful, but we don't need a write-lock in -the for-loop. Use sync.Cond when you need serial access to the -"condition". In our case our condition is if `next != nil || removed`, -and there's no reason to serialize that condition for goroutines -waiting on NextWait() (since it's just a read operation). -*/ + type CElement struct { mtx tmsync.RWMutex prev *CElement @@ -51,11 +25,11 @@ type CElement struct { nextWaitCh chan struct{} removed bool - Value interface{} // immutable + Value interface{} } -// Blocking implementation of Next(). -// May return nil iff CElement was tail and got removed. + + func (e *CElement) NextWait() *CElement { for { e.mtx.RLock() @@ -69,13 +43,13 @@ func (e *CElement) NextWait() *CElement { } nextWg.Wait() - // e.next doesn't necessarily exist here. - // That's why we need to continue a for-loop. + + } } -// Blocking implementation of Prev(). -// May return nil iff CElement was head and got removed. + + func (e *CElement) PrevWait() *CElement { for { e.mtx.RLock() @@ -92,8 +66,8 @@ func (e *CElement) PrevWait() *CElement { } } -// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, -// channel will be closed. + + func (e *CElement) PrevWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -101,8 +75,8 @@ func (e *CElement) PrevWaitChan() <-chan struct{} { return e.prevWaitCh } -// NextWaitChan can be used to wait until Next becomes not nil. Once it does, -// channel will be closed. + + func (e *CElement) NextWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -110,7 +84,7 @@ func (e *CElement) NextWaitChan() <-chan struct{} { return e.nextWaitCh } -// Nonblocking, may return nil if at the end. + func (e *CElement) Next() *CElement { e.mtx.RLock() val := e.next @@ -118,7 +92,7 @@ func (e *CElement) Next() *CElement { return val } -// Nonblocking, may return nil if at the end. + func (e *CElement) Prev() *CElement { e.mtx.RLock() prev := e.prev @@ -153,20 +127,20 @@ func (e *CElement) DetachPrev() { e.mtx.Unlock() } -// NOTE: This function needs to be safe for -// concurrent goroutines waiting on nextWg. + + func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() oldNext := e.next e.next = newNext if oldNext != nil && newNext == nil { - // See https://golang.org/pkg/sync/: - // - // If a WaitGroup is reused to wait for several independent sets of - // events, new Add calls must happen after all previous Wait calls have - // returned. - e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + + + + + + e.nextWg = waitGroup1() e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -176,15 +150,15 @@ func (e *CElement) SetNext(newNext *CElement) { e.mtx.Unlock() } -// NOTE: This function needs to be safe for -// concurrent goroutines waiting on prevWg + + func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWg = waitGroup1() e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -199,7 +173,7 @@ func (e *CElement) SetRemoved() { e.removed = true - // This wakes up anyone waiting in either direction. + if e.prev == nil { e.prevWg.Done() close(e.prevWaitCh) @@ -211,20 +185,20 @@ func (e *CElement) SetRemoved() { e.mtx.Unlock() } -//-------------------------------------------------------------------------------- -// CList represents a linked list. -// The zero value for CList is an empty list ready to use. -// Operations are goroutine-safe. -// Panics if length grows beyond the max. + + + + + type CList struct { mtx tmsync.RWMutex wg *sync.WaitGroup waitCh chan struct{} - head *CElement // first element - tail *CElement // last element - len int // list length - maxLen int // max list length + head *CElement + tail *CElement + len int + maxLen int } func (l *CList) Init() *CList { @@ -239,11 +213,11 @@ func (l *CList) Init() *CList { return l } -// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. + func New() *CList { return newWithMax(MaxLength) } -// Return CList with given maxLength. -// Will panic if list exceeds given maxLength. + + func newWithMax(maxLength int) *CList { l := new(CList) l.maxLen = maxLength @@ -265,7 +239,7 @@ func (l *CList) Front() *CElement { } func (l *CList) FrontWait() *CElement { - // Loop until the head is non-nil else wait and try again + for { l.mtx.RLock() head := l.head @@ -276,7 +250,7 @@ func (l *CList) FrontWait() *CElement { return head } wg.Wait() - // NOTE: If you think l.head exists here, think harder. + } } @@ -298,13 +272,13 @@ func (l *CList) BackWait() *CElement { return tail } wg.Wait() - // l.tail doesn't necessarily exist here. - // That's why we need to continue a for-loop. + + } } -// WaitChan can be used to wait until Front or Back becomes not nil. Once it -// does, channel will be closed. + + func (l *CList) WaitChan() <-chan struct{} { l.mtx.Lock() defer l.mtx.Unlock() @@ -312,11 +286,11 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } -// Panics if list grows beyond its max length. + func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - // Construct a new element + e := &CElement{ prev: nil, prevWg: waitGroup1(), @@ -328,7 +302,7 @@ func (l *CList) PushBack(v interface{}) *CElement { Value: v, } - // Release waiters on FrontWait/BackWait maybe + if l.len == 0 { l.wg.Done() close(l.waitCh) @@ -338,21 +312,21 @@ func (l *CList) PushBack(v interface{}) *CElement { } l.len++ - // Modify the tail + if l.tail == nil { l.head = e l.tail = e } else { - e.SetPrev(l.tail) // We must init e first. - l.tail.SetNext(e) // This will make e accessible. - l.tail = e // Update the list. + e.SetPrev(l.tail) + l.tail.SetNext(e) + l.tail = e } l.mtx.Unlock() return e } -// CONTRACT: Caller must call e.DetachPrev() and/or e.DetachNext() to avoid memory leaks. -// NOTE: As per the contract of CList, removed elements cannot be added back. + + func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() @@ -372,16 +346,16 @@ func (l *CList) Remove(e *CElement) interface{} { panic("Remove(e) with false tail") } - // If we're removing the only item, make CList FrontWait/BackWait wait. + if l.len == 1 { - l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.wg = waitGroup1() l.waitCh = make(chan struct{}) } - // Update l.len + l.len-- - // Connect next/prev and set head/tail + if prev == nil { l.head = next } else { @@ -393,7 +367,7 @@ func (l *CList) Remove(e *CElement) interface{} { next.SetPrev(prev) } - // Set .Done() on e, otherwise waiters will wait forever. + e.SetRemoved() l.mtx.Unlock() diff --git a/mempool/ids.go b/mempool/ids.go index d64a07bda..5afb3bc92 100644 --- a/mempool/ids.go +++ b/mempool/ids.go @@ -1,3 +1,3 @@ package mempool -// These functions were moved into v0/reactor.go and v1/reactor.go + diff --git a/mempool/mempool.go b/mempool/mempool.go index 48aa380f4..dbbec0e02 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -13,107 +13,107 @@ import ( const ( MempoolChannel = byte(0x30) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) + + UnknownPeerID uint16 = 0 MaxActiveIDs = math.MaxUint16 ) -// Mempool defines the mempool interface. -// -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. + + + + type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. + + CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - // RemoveTxByKey removes a transaction, identified by its key, - // from the mempool. + + RemoveTxByKey(txKey types.TxKey) error - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). + + + + + + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). + + + ReapMaxTxs(max int) types.Txs - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. + + Lock() - // Unlock unlocks the mempool. + Unlock() - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. + + + + + + Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error - // SetPreCheckFn sets the pre-check function. + SetPreCheckFn(fn PreCheckFunc) - // SetPostCheckFn sets the post-check function. + SetPostCheckFn(fn PostCheckFunc) - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. - // - // NOTE: - // 1. Lock/Unlock must be managed by caller. + + + + + FlushAppConn() error - // Flush removes all transactions from the mempool and caches. + Flush() - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. + + + + + TxsAvailable() <-chan struct{} - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. + + EnableTxsAvailable() - // Size returns the number of transactions in the mempool. + Size() int - // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. + + + type PreCheckFunc func(types.Tx) error -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. + + + type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. + + func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) @@ -126,8 +126,8 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { } } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. + + func PostCheckMaxGas(maxGas int64) PostCheckFunc { return func(tx types.Tx, res *abci.ResponseCheckTx) error { if maxGas == -1 { @@ -146,14 +146,14 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { } } -// ErrTxInCache is returned to the client if we saw tx earlier + var ErrTxInCache = errors.New("tx already exists in cache") -// TxKey is the fixed length array key used as an index. + type TxKey [sha256.Size]byte -// ErrTxTooLarge defines an error when a transaction is too big to be sent in a -// message to other peers. + + type ErrTxTooLarge struct { Max int Actual int @@ -163,8 +163,8 @@ func (e ErrTxTooLarge) Error() string { return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) } -// ErrMempoolIsFull defines an error where Tendermint and the application cannot -// handle that much load. + + type ErrMempoolIsFull struct { NumTxs int MaxTxs int @@ -182,7 +182,7 @@ func (e ErrMempoolIsFull) Error() string { ) } -// ErrPreCheck defines an error where a transaction fails a pre-check. + type ErrPreCheck struct { Reason error } @@ -191,7 +191,7 @@ func (e ErrPreCheck) Error() string { return e.Reason.Error() } -// IsPreCheckError returns true if err is due to pre check failure. + func IsPreCheckError(err error) bool { return errors.As(err, &ErrPreCheck{}) } diff --git a/mempool/metrics.go b/mempool/metrics.go index 5d3022e80..613715038 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -8,42 +8,42 @@ import ( ) const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. + + MetricsSubsystem = "mempool" ) -// Metrics contains metrics exposed by this package. -// see MetricsProvider for descriptions. + + type Metrics struct { - // Size of the mempool. + Size metrics.Gauge - // Histogram of transaction sizes, in bytes. + TxSizeBytes metrics.Histogram - // Number of failed transactions. + FailedTxs metrics.Counter - // RejectedTxs defines the number of rejected transactions. These are - // transactions that passed CheckTx but failed to make it into the mempool - // due to resource limits, e.g. mempool is full and no lower priority - // transactions exist in the mempool. + + + + RejectedTxs metrics.Counter - // EvictedTxs defines the number of evicted transactions. These are valid - // transactions that passed CheckTx and existed in the mempool but were later - // evicted to make room for higher priority valid transactions that passed - // CheckTx. + + + + EvictedTxs metrics.Counter - // Number of times transactions are rechecked in the mempool. + RecheckTimes metrics.Counter } -// PrometheusMetrics returns Metrics build using Prometheus client library. -// Optionally, labels can be provided along with their values ("foo", -// "fooValue"). + + + func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels := []string{} for i := 0; i < len(labelsAndValues); i += 2 { @@ -95,7 +95,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { } } -// NopMetrics returns no-op Metrics. + func NopMetrics() *Metrics { return &Metrics{ Size: discard.NewGauge(), diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go index 3f293381f..014816a9e 100644 --- a/mempool/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" ) -// Mempool is an empty implementation of a Mempool, useful for testing. + type Mempool struct{} var _ mempool.Mempool = Mempool{} diff --git a/mempool/tx.go b/mempool/tx.go index d13f3d6b8..191f1cbc0 100644 --- a/mempool/tx.go +++ b/mempool/tx.go @@ -4,14 +4,14 @@ import ( "github.com/tendermint/tendermint/p2p" ) -// TxInfo are parameters that get passed when attempting to add a tx to the -// mempool. + + type TxInfo struct { - // SenderID is the internal peer ID used in the mempool to identify the - // sender, storing two bytes with each transaction instead of 20 bytes for - // the types.NodeID. + + + SenderID uint16 - // SenderP2PID is the actual p2p.ID of the sender, used e.g. for logging. + SenderP2PID p2p.ID } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index 1d0eb3a9b..a543b64ab 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -20,45 +20,45 @@ import ( var _ mempool.Mempool = (*TxMempool)(nil) -// TxMempoolOption sets an optional parameter on the TxMempool. + type TxMempoolOption func(*TxMempool) -// TxMempool implemements the Mempool interface and allows the application to -// set priority values on transactions in the CheckTx response. When selecting -// transactions to include in a block, higher-priority transactions are chosen -// first. When evicting transactions from the mempool for size constraints, -// lower-priority transactions are evicted sooner. -// -// Within the mempool, transactions are ordered by time of arrival, and are -// gossiped to the rest of the network based on that order (gossip order does -// not take priority into account). + + + + + + + + + type TxMempool struct { - // Immutable fields + logger log.Logger config *config.MempoolConfig proxyAppConn proxy.AppConnMempool metrics *mempool.Metrics - cache mempool.TxCache // seen transactions + cache mempool.TxCache - // Atomically-updated fields - txsBytes int64 // atomic: the total size of all transactions in the mempool, in bytes - txRecheck int64 // atomic: the number of pending recheck calls + + txsBytes int64 + txRecheck int64 - // Synchronized fields, protected by mtx. + mtx *sync.RWMutex notifiedTxsAvailable bool - txsAvailable chan struct{} // one value sent per height when mempool is not empty + txsAvailable chan struct{} preCheck mempool.PreCheckFunc postCheck mempool.PostCheckFunc - height int64 // the latest height passed to Update + height int64 - txs *clist.CList // valid transactions (passed CheckTx) + txs *clist.CList txByKey map[types.TxKey]*clist.CElement - txBySender map[string]*clist.CElement // for sender != "" + txBySender map[string]*clist.CElement } -// NewTxMempool constructs a new, empty priority mempool at the specified -// initial height and using the given config and options. + + func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -91,59 +91,59 @@ func NewTxMempool( return txmp } -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. + + + func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.preCheck = f } } -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. + + + func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.postCheck = f } } -// WithMetrics sets the mempool's metrics collector. + func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { return func(txmp *TxMempool) { txmp.metrics = metrics } } -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. + + func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } -// Unlock releases a write-lock on the mempool. + func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. + + func (txmp *TxMempool) Size() int { return txmp.txs.Len() } -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. + + func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before -// calling FlushAppConn. + + + + func (txmp *TxMempool) FlushAppConn() error { - // N.B.: We have to issue the call outside the lock so that its callback can - // fire. It's safe to do this, the flush will block until complete. - // - // We could just not require the caller to hold the lock at all, but the - // semantics of the Mempool interface require the caller to hold it, and we - // can't change that without disrupting existing use. + + + + + + txmp.mtx.Unlock() defer txmp.mtx.Lock() return txmp.proxyAppConn.FlushSync() } -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. + + func (txmp *TxMempool) EnableTxsAvailable() { txmp.mtx.Lock() defer txmp.mtx.Unlock() @@ -151,60 +151,60 @@ func (txmp *TxMempool) EnableTxsAvailable() { txmp.txsAvailable = make(chan struct{}, 1) } -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. + + func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } -// CheckTx adds the given transaction to the mempool if it fits and passes the -// application's ABCI CheckTx method. -// -// CheckTx reports an error without adding tx if: -// -// - The size of tx exceeds the configured maximum transaction size. -// - The pre-check hook is defined and reports an error for tx. -// - The transaction already exists in the cache. -// - The proxy connection to the application fails. -// -// If tx passes all of the above conditions, it is passed (asynchronously) to -// the application's ABCI CheckTx method and this CheckTx method returns nil. -// If cb != nil, it is called when the ABCI request completes to report the -// application response. -// -// If the application accepts the transaction and the mempool is full, the -// mempool evicts one or more of the lowest-priority transaction whose priority -// is (strictly) lower than the priority of tx and whose size together exceeds -// the size of tx, and adds tx instead. If no such transactions exist, tx is -// discarded. + + + + + + + + + + + + + + + + + + + + func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { - // During the initial phase of CheckTx, we do not need to modify any state. - // A transaction will not actually be added to the mempool until it survives - // a call to the ABCI CheckTx method and size constraint checks. + + + height, err := func() (int64, error) { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - // Reject transactions in excess of the configured maximum transaction size. + if len(tx) > txmp.config.MaxTxBytes { return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - // If a precheck hook is defined, call it before invoking the application. + if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { return 0, mempool.ErrPreCheck{Reason: err} } } - // Early exit if the proxy connection has an error. + if err := txmp.proxyAppConn.Error(); err != nil { return 0, err } txKey := tx.Key() - // Check for the transaction in the cache. + if !txmp.cache.Push(tx) { - // If the cached transaction is also in the pool, record its sender. + if elt, ok := txmp.txByKey[txKey]; ok { w, _ := elt.Value.(*WrappedTx) w.SetPeer(txInfo.SenderID) @@ -217,13 +217,13 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return err } - // Initiate an ABCI CheckTx for this transaction. The callback is - // responsible for adding the transaction to the pool if it survives. - // - // N.B.: We have to issue the call outside the lock. In a local client, - // even an "async" call invokes its callback immediately which will make - // the callback deadlock trying to acquire the same lock. This isn't a - // problem with out-of-process calls, but this has to work for both. + + + + + + + reqRes := txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) if err := txmp.proxyAppConn.FlushSync(); err != nil { return err @@ -244,17 +244,17 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return nil } -// RemoveTxByKey removes the transaction with the specified key from the -// mempool. It reports an error if no such transaction exists. This operation -// does not remove the transaction from the cache. + + + func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { txmp.mtx.Lock() defer txmp.mtx.Unlock() return txmp.removeTxByKey(txKey) } -// removeTxByKey removes the specified transaction key from the mempool. -// The caller must hold txmp.mtx excluxively. + + func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { if elt, ok := txmp.txByKey[key]; ok { w, _ := elt.Value.(*WrappedTx) @@ -269,8 +269,8 @@ func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { return fmt.Errorf("transaction %x not found", key) } -// removeTxByElement removes the specified transaction element from the mempool. -// The caller must hold txmp.mtx exclusively. + + func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { w, _ := elt.Value.(*WrappedTx) delete(txmp.txByKey, w.tx.Key()) @@ -281,14 +281,14 @@ func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { atomic.AddInt64(&txmp.txsBytes, -w.Size()) } -// Flush purges the contents of the mempool and the cache, leaving both empty. -// The current height is not modified by this operation. + + func (txmp *TxMempool) Flush() { txmp.mtx.Lock() defer txmp.mtx.Unlock() - // Remove all the transactions in the list explicitly, so that the sizes - // and indexes get updated properly. + + cur := txmp.txs.Front() for cur != nil { next := cur.Next() @@ -297,14 +297,14 @@ func (txmp *TxMempool) Flush() { } txmp.cache.Reset() - // Discard any pending recheck calls that may be in flight. The calls will - // still complete, but will have no effect on the mempool. + + atomic.StoreInt64(&txmp.txRecheck, 0) } -// allEntriesSorted returns a slice of all the transactions currently in the -// mempool, sorted in nonincreasing order by priority with ties broken by -// increasing order of arrival time. + + + func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() @@ -317,28 +317,28 @@ func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { if all[i].priority == all[j].priority { return all[i].timestamp.Before(all[j].timestamp) } - return all[i].priority > all[j].priority // N.B. higher priorities first + return all[i].priority > all[j].priority }) return all } -// ReapMaxBytesMaxGas returns a slice of valid transactions that fit within the -// size and gas constraints. The results are ordered by nonincreasing priority, -// with ties broken by increasing order of arrival. Reaping transactions does -// not remove them from the mempool. -// -// If maxBytes < 0, no limit is set on the total size in bytes. -// If maxGas < 0, no limit is set on the total gas cost. -// -// If the mempool is empty or has no transactions fitting within the given -// constraints, the result will also be empty. + + + + + + + + + + func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { var totalGas, totalBytes int64 - var keep []types.Tx //nolint:prealloc + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { - // N.B. When computing byte size, we need to include the overhead for - // encoding as protobuf to send to the application. + + totalGas += w.gasWanted totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { @@ -349,24 +349,24 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { return keep } -// TxsWaitChan returns a channel that is closed when there is at least one -// transaction available to be gossiped. + + func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } -// TxsFront returns the frontmost element of the pending transaction list. -// It will be nil if the mempool is empty. + + func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } -// ReapMaxTxs returns up to max transactions from the mempool. The results are -// ordered by nonincreasing priority with ties broken by increasing order of -// arrival. Reaping transactions does not remove them from the mempool. -// -// If max < 0, all transactions in the mempool are reaped. -// -// The result may have fewer than max elements (possibly zero) if the mempool -// does not have that many transactions available. + + + + + + + + func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - var keep []types.Tx //nolint:prealloc + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { if max >= 0 && len(keep) >= max { @@ -377,28 +377,28 @@ func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { return keep } -// Update removes all the given transactions from the mempool and the cache, -// and updates the current block height. The blockTxs and deliverTxResponses -// must have the same length with each response corresponding to the tx at the -// same offset. -// -// If the configuration enables recheck, Update sends each remaining -// transaction after removing blockTxs to the ABCI CheckTx method. Any -// transactions marked as invalid during recheck are also removed. -// -// The caller must hold an exclusive mempool lock (by calling txmp.Lock) before -// calling Update. + + + + + + + + + + + func (txmp *TxMempool) Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error { - // Safety sanity check: The caller is required to hold the lock. + if txmp.mtx.TryLock() { txmp.mtx.Unlock() panic("mempool: Update caller does not hold the lock") } - // Safety check: Transactions and responses must match in number. + if len(blockTxs) != len(deliverTxResponses) { panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses", len(blockTxs), len(deliverTxResponses))) @@ -408,24 +408,24 @@ func (txmp *TxMempool) Update( txmp.notifiedTxsAvailable = false for i, tx := range blockTxs { - // Add successful committed transactions to the cache (if they are not - // already present). Transactions that failed to commit are removed from - // the cache unless the operator has explicitly requested we keep them. + + + if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(tx) } - // Regardless of success, remove the transaction from the mempool. + _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) - // If there are any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. + + + size := txmp.Size() txmp.metrics.Size.Set(float64(size)) if size > 0 { @@ -446,19 +446,19 @@ func (txmp *TxMempool) SetPostCheckFn(fn mempool.PostCheckFunc) { txmp.postCheck = fn } -// initialTxCallback handles the ABCI CheckTx response for the first time a -// transaction is added to the mempool. A recheck after a block is committed -// goes to the default callback (see recheckTxCallback). -// -// If either the application rejected the transaction or a post-check hook is -// defined and rejects the transaction, it is discarded. -// -// Otherwise, if the mempool is full, check for lower-priority transactions -// that can be evicted to make room for the new one. If no such transactions -// exist, this transaction is logged and dropped; otherwise the selected -// transactions are evicted. -// -// Finally, the new transaction is added and size stats updated. + + + + + + + + + + + + + func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { @@ -490,14 +490,14 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.metrics.FailedTxs.Add(1) - // Remove the invalid transaction from the cache, unless the operator has - // instructed us to keep invalid transactions. + + if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - // If there was a post-check error, record its text in the result for - // debugging purposes. + + if err != nil { checkTxRes.CheckTx.MempoolError = err.Error() } @@ -507,9 +507,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { priority := checkTxRes.CheckTx.Priority sender := checkTxRes.CheckTx.Sender - // Disallow multiple concurrent transactions from the same sender assigned - // by the ABCI application. As a special case, an empty sender is not - // restricted. + + + if sender != "" { elt, ok := txmp.txBySender[sender] if ok { @@ -526,15 +526,15 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - // At this point the application has ruled the transaction valid, but the - // mempool might be full. If so, find the lowest-priority items with lower - // priority than the application assigned to this new one, and evict as many - // of them as necessary to make room for tx. If no such items exist, we - // discard tx. + + + + + if err := txmp.canAddTx(wtx); err != nil { - var victims []*clist.CElement // eligible transactions for eviction - var victimBytes int64 // total size of victims + var victims []*clist.CElement + var victimBytes int64 for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { cw := cur.Value.(*WrappedTx) if cw.priority < priority { @@ -543,9 +543,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - // If there are no suitable eviction candidates, or the total size of - // those candidates is not enough to make room for the new transaction, - // drop the new one. + + + if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( @@ -564,8 +564,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { "new_priority", priority, ) - // Sort lowest priority items first so they will be evicted first. Break - // ties in favor of newer items (to maintain FIFO semantics in a group). + + sort.Slice(victims, func(i, j int) bool { iw := victims[i].Value.(*WrappedTx) jw := victims[j].Value.(*WrappedTx) @@ -575,7 +575,7 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { return iw.Priority() < jw.Priority() }) - // Evict as many of the victims as necessary to make room. + var evictedBytes int64 for _, vic := range victims { w := vic.Value.(*WrappedTx) @@ -589,8 +589,8 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) - // We may not need to evict all the eligible transactions. Bail out - // early if we have made enough room. + + evictedBytes += w.Size() if evictedBytes >= wtx.Size() { break @@ -625,26 +625,26 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { atomic.AddInt64(&txmp.txsBytes, wtx.Size()) } -// recheckTxCallback handles the responses from ABCI CheckTx calls issued -// during the recheck phase of a block Update. It updates the recheck counter -// and removes any transactions invalidated by the application. -// -// This callback is NOT executed for the initial CheckTx on a new transaction; -// that case is handled by initialTxCallback instead. + + + + + + func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { - // Don't log this; this is the default callback and other response types - // can safely be ignored. + + return } - // Check whether we are expecting recheck responses at this point. - // If not, we will ignore the response, this usually means the mempool was Flushed. - // If this is the "last" pending recheck, trigger a notification when it's been processed. + + + numLeft := atomic.AddInt64(&txmp.txRecheck, -1) if numLeft == 0 { - defer txmp.notifyTxsAvailable() // notify waiters on return, if mempool is non-empty + defer txmp.notifyTxsAvailable() } else if numLeft < 0 { return } @@ -655,16 +655,16 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.mtx.Lock() defer txmp.mtx.Unlock() - // Find the transaction reported by the ABCI callback. It is possible the - // transaction was evicted during the recheck, in which case the transaction - // will be gone. + + + elt, ok := txmp.txByKey[tx.Key()] if !ok { return } wtx := elt.Value.(*WrappedTx) - // If a postcheck hook is defined, call it before checking the result. + var err error if txmp.postCheck != nil { err = txmp.postCheck(tx, checkTxRes.CheckTx) @@ -672,7 +672,7 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { wtx.SetPriority(checkTxRes.CheckTx.Priority) - return // N.B. Size of mempool did not change + return } txmp.logger.Debug( @@ -690,12 +690,12 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.Size.Set(float64(txmp.Size())) } -// recheckTransactions initiates re-CheckTx ABCI calls for all the transactions -// currently in the mempool. It reports the number of recheck calls that were -// successfully initiated. -// -// Precondition: The mempool is not empty. -// The caller must hold txmp.mtx exclusively. + + + + + + func (txmp *TxMempool) recheckTransactions() { if txmp.Size() == 0 { panic("mempool: cannot run recheck on an empty mempool") @@ -705,10 +705,10 @@ func (txmp *TxMempool) recheckTransactions() { "num_txs", txmp.Size(), "height", txmp.height, ) - // N.B.: We have to issue the calls outside the lock. In a local client, - // even an "async" call invokes its callback immediately which will make the - // callback deadlock trying to acquire the same lock. This isn't a problem - // with out-of-process calls, but this has to work for both. + + + + txmp.mtx.Unlock() defer txmp.mtx.Lock() @@ -716,7 +716,7 @@ func (txmp *TxMempool) recheckTransactions() { for e := txmp.txs.Front(); e != nil; e = e.Next() { wtx := e.Value.(*WrappedTx) - // The response for this CheckTx is handled by the default recheckTxCallback. + _ = txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ Tx: wtx.tx, Type: abci.CheckTxType_Recheck, @@ -730,9 +730,9 @@ func (txmp *TxMempool) recheckTransactions() { txmp.proxyAppConn.FlushAsync() } -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is -// returned and the transaction can be inserted into the mempool. + + + func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { numTxs := txmp.Size() txBytes := txmp.SizeBytes() @@ -749,21 +749,21 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } -// purgeExpiredTxs removes all transactions from the mempool that have exceeded -// their respective height or time-based limits as of the given blockHeight. -// Transactions removed by this operation are not removed from the cache. -// -// The caller must hold txmp.mtx exclusively. + + + + + func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { - return // nothing to do + return } now := time.Now() cur := txmp.txs.Front() for cur != nil { - // N.B. Grab the next element first, since if we remove cur its successor - // will be invalidated. + + next := cur.Next() w := cur.Value.(*WrappedTx) @@ -782,11 +782,11 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - return // nothing to do + return } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true select { diff --git a/mempool/v1/tx.go b/mempool/v1/tx.go index 88522a8a7..88134c052 100644 --- a/mempool/v1/tx.go +++ b/mempool/v1/tx.go @@ -7,25 +7,25 @@ import ( "github.com/tendermint/tendermint/types" ) -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. + + type WrappedTx struct { - tx types.Tx // the original transaction data - hash types.TxKey // the transaction hash - height int64 // height when this transaction was initially checked (for expiry) - timestamp time.Time // time when transaction was entered (for TTL) + tx types.Tx + hash types.TxKey + height int64 + timestamp time.Time mtx sync.Mutex - gasWanted int64 // app: gas required to execute this transaction - priority int64 // app: priority value for this transaction - sender string // app: assigned sender label - peers map[uint16]bool // peer IDs who have sent us this transaction + gasWanted int64 + priority int64 + sender string + peers map[uint16]bool } -// Size reports the size of the raw transaction in bytes. + func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } -// SetPeer adds the specified peer ID as a sender of w. + func (w *WrappedTx) SetPeer(id uint16) { w.mtx.Lock() defer w.mtx.Unlock() @@ -36,7 +36,7 @@ func (w *WrappedTx) SetPeer(id uint16) { } } -// HasPeer reports whether the specified peer ID is a sender of w. + func (w *WrappedTx) HasPeer(id uint16) bool { w.mtx.Lock() defer w.mtx.Unlock() @@ -44,42 +44,42 @@ func (w *WrappedTx) HasPeer(id uint16) bool { return ok } -// SetGasWanted sets the application-assigned gas requirement of w. + func (w *WrappedTx) SetGasWanted(gas int64) { w.mtx.Lock() defer w.mtx.Unlock() w.gasWanted = gas } -// GasWanted reports the application-assigned gas requirement of w. + func (w *WrappedTx) GasWanted() int64 { w.mtx.Lock() defer w.mtx.Unlock() return w.gasWanted } -// SetSender sets the application-assigned sender of w. + func (w *WrappedTx) SetSender(sender string) { w.mtx.Lock() defer w.mtx.Unlock() w.sender = sender } -// Sender reports the application-assigned sender of w. + func (w *WrappedTx) Sender() string { w.mtx.Lock() defer w.mtx.Unlock() return w.sender } -// SetPriority sets the application-assigned priority of w. + func (w *WrappedTx) SetPriority(p int64) { w.mtx.Lock() defer w.mtx.Unlock() w.priority = p } -// Priority reports the application-assigned priority of w. + func (w *WrappedTx) Priority() int64 { w.mtx.Lock() defer w.mtx.Unlock() diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go index 2ba9eee27..6098f6c98 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package block @@ -16,7 +16,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockExecutorI is an autogenerated mock type for the ExecutorI type + type MockExecutorI struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockExecutorI) EXPECT() *MockExecutorI_Expecter { return &MockExecutorI_Expecter{mock: &_m.Mock} } -// AddConsensusMsgs provides a mock function with given fields: _a0 + func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _va := make([]interface{}, len(_a0)) for _i := range _a0 { @@ -40,13 +40,13 @@ func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _m.Called(_ca...) } -// MockExecutorI_AddConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddConsensusMsgs' + type MockExecutorI_AddConsensusMsgs_Call struct { *mock.Call } -// AddConsensusMsgs is a helper method to define mock.On call -// - _a0 ...proto.Message + + func (_e *MockExecutorI_Expecter) AddConsensusMsgs(_a0 ...interface{}) *MockExecutorI_AddConsensusMsgs_Call { return &MockExecutorI_AddConsensusMsgs_Call{Call: _e.mock.On("AddConsensusMsgs", append([]interface{}{}, _a0...)...)} @@ -75,7 +75,7 @@ func (_c *MockExecutorI_AddConsensusMsgs_Call) RunAndReturn(run func(...proto.Me return _c } -// Commit provides a mock function with given fields: _a0, _a1, resp + func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state.ABCIResponses) ([]byte, int64, error) { ret := _m.Called(_a0, _a1, resp) @@ -112,15 +112,15 @@ func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state. return r0, r1, r2 } -// MockExecutorI_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' + type MockExecutorI_Commit_Call struct { *mock.Call } -// Commit is a helper method to define mock.On call -// - _a0 *types.State -// - _a1 *types.Block -// - resp *state.ABCIResponses + + + + func (_e *MockExecutorI_Expecter) Commit(_a0 interface{}, _a1 interface{}, resp interface{}) *MockExecutorI_Commit_Call { return &MockExecutorI_Commit_Call{Call: _e.mock.On("Commit", _a0, _a1, resp)} } @@ -142,7 +142,7 @@ func (_c *MockExecutorI_Commit_Call) RunAndReturn(run func(*types.State, *types. return _c } -// CreateBlock provides a mock function with given fields: height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes + func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, nextSeqHash [32]byte, _a4 *types.State, maxBlockDataSizeBytes uint64) *types.Block { ret := _m.Called(height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes) @@ -162,18 +162,18 @@ func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, la return r0 } -// MockExecutorI_CreateBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBlock' + type MockExecutorI_CreateBlock_Call struct { *mock.Call } -// CreateBlock is a helper method to define mock.On call -// - height uint64 -// - lastCommit *types.Commit -// - lastHeaderHash [32]byte -// - nextSeqHash [32]byte -// - _a4 *types.State -// - maxBlockDataSizeBytes uint64 + + + + + + + func (_e *MockExecutorI_Expecter) CreateBlock(height interface{}, lastCommit interface{}, lastHeaderHash interface{}, nextSeqHash interface{}, _a4 interface{}, maxBlockDataSizeBytes interface{}) *MockExecutorI_CreateBlock_Call { return &MockExecutorI_CreateBlock_Call{Call: _e.mock.On("CreateBlock", height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes)} } @@ -195,7 +195,7 @@ func (_c *MockExecutorI_CreateBlock_Call) RunAndReturn(run func(uint64, *types.C return _c } -// ExecuteBlock provides a mock function with given fields: _a0 + func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, error) { ret := _m.Called(_a0) @@ -225,13 +225,13 @@ func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, e return r0, r1 } -// MockExecutorI_ExecuteBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteBlock' + type MockExecutorI_ExecuteBlock_Call struct { *mock.Call } -// ExecuteBlock is a helper method to define mock.On call -// - _a0 *types.Block + + func (_e *MockExecutorI_Expecter) ExecuteBlock(_a0 interface{}) *MockExecutorI_ExecuteBlock_Call { return &MockExecutorI_ExecuteBlock_Call{Call: _e.mock.On("ExecuteBlock", _a0)} } @@ -253,7 +253,7 @@ func (_c *MockExecutorI_ExecuteBlock_Call) RunAndReturn(run func(*types.Block) ( return _c } -// GetAppInfo provides a mock function with given fields: + func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { ret := _m.Called() @@ -283,12 +283,12 @@ func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { return r0, r1 } -// MockExecutorI_GetAppInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAppInfo' + type MockExecutorI_GetAppInfo_Call struct { *mock.Call } -// GetAppInfo is a helper method to define mock.On call + func (_e *MockExecutorI_Expecter) GetAppInfo() *MockExecutorI_GetAppInfo_Call { return &MockExecutorI_GetAppInfo_Call{Call: _e.mock.On("GetAppInfo")} } @@ -310,7 +310,7 @@ func (_c *MockExecutorI_GetAppInfo_Call) RunAndReturn(run func() (*abcitypes.Res return _c } -// GetConsensusMsgs provides a mock function with given fields: + func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { ret := _m.Called() @@ -330,12 +330,12 @@ func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { return r0 } -// MockExecutorI_GetConsensusMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConsensusMsgs' + type MockExecutorI_GetConsensusMsgs_Call struct { *mock.Call } -// GetConsensusMsgs is a helper method to define mock.On call + func (_e *MockExecutorI_Expecter) GetConsensusMsgs() *MockExecutorI_GetConsensusMsgs_Call { return &MockExecutorI_GetConsensusMsgs_Call{Call: _e.mock.On("GetConsensusMsgs")} } @@ -357,7 +357,7 @@ func (_c *MockExecutorI_GetConsensusMsgs_Call) RunAndReturn(run func() []proto.M return _c } -// InitChain provides a mock function with given fields: genesis, genesisChecksum, valset + func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisChecksum string, valset []*tenderminttypes.Validator) (*abcitypes.ResponseInitChain, error) { ret := _m.Called(genesis, genesisChecksum, valset) @@ -387,15 +387,15 @@ func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisC return r0, r1 } -// MockExecutorI_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' + type MockExecutorI_InitChain_Call struct { *mock.Call } -// InitChain is a helper method to define mock.On call -// - genesis *tenderminttypes.GenesisDoc -// - genesisChecksum string -// - valset []*tenderminttypes.Validator + + + + func (_e *MockExecutorI_Expecter) InitChain(genesis interface{}, genesisChecksum interface{}, valset interface{}) *MockExecutorI_InitChain_Call { return &MockExecutorI_InitChain_Call{Call: _e.mock.On("InitChain", genesis, genesisChecksum, valset)} } @@ -417,18 +417,18 @@ func (_c *MockExecutorI_InitChain_Call) RunAndReturn(run func(*tenderminttypes.G return _c } -// UpdateMempoolAfterInitChain provides a mock function with given fields: s + func (_m *MockExecutorI) UpdateMempoolAfterInitChain(s *types.State) { _m.Called(s) } -// MockExecutorI_UpdateMempoolAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateMempoolAfterInitChain' + type MockExecutorI_UpdateMempoolAfterInitChain_Call struct { *mock.Call } -// UpdateMempoolAfterInitChain is a helper method to define mock.On call -// - s *types.State + + func (_e *MockExecutorI_Expecter) UpdateMempoolAfterInitChain(s interface{}) *MockExecutorI_UpdateMempoolAfterInitChain_Call { return &MockExecutorI_UpdateMempoolAfterInitChain_Call{Call: _e.mock.On("UpdateMempoolAfterInitChain", s)} } @@ -450,7 +450,7 @@ func (_c *MockExecutorI_UpdateMempoolAfterInitChain_Call) RunAndReturn(run func( return _c } -// UpdateProposerFromBlock provides a mock function with given fields: s, seqSet, _a2 + func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, _a2 *types.Block) bool { ret := _m.Called(s, seqSet, _a2) @@ -468,15 +468,15 @@ func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.S return r0 } -// MockExecutorI_UpdateProposerFromBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateProposerFromBlock' + type MockExecutorI_UpdateProposerFromBlock_Call struct { *mock.Call } -// UpdateProposerFromBlock is a helper method to define mock.On call -// - s *types.State -// - seqSet *types.SequencerSet -// - _a2 *types.Block + + + + func (_e *MockExecutorI_Expecter) UpdateProposerFromBlock(s interface{}, seqSet interface{}, _a2 interface{}) *MockExecutorI_UpdateProposerFromBlock_Call { return &MockExecutorI_UpdateProposerFromBlock_Call{Call: _e.mock.On("UpdateProposerFromBlock", s, seqSet, _a2)} } @@ -498,22 +498,22 @@ func (_c *MockExecutorI_UpdateProposerFromBlock_Call) RunAndReturn(run func(*typ return _c } -// UpdateStateAfterCommit provides a mock function with given fields: s, resp, appHash, height, lastHeaderHash + func (_m *MockExecutorI) UpdateStateAfterCommit(s *types.State, resp *state.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { _m.Called(s, resp, appHash, height, lastHeaderHash) } -// MockExecutorI_UpdateStateAfterCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterCommit' + type MockExecutorI_UpdateStateAfterCommit_Call struct { *mock.Call } -// UpdateStateAfterCommit is a helper method to define mock.On call -// - s *types.State -// - resp *state.ABCIResponses -// - appHash []byte -// - height uint64 -// - lastHeaderHash [32]byte + + + + + + func (_e *MockExecutorI_Expecter) UpdateStateAfterCommit(s interface{}, resp interface{}, appHash interface{}, height interface{}, lastHeaderHash interface{}) *MockExecutorI_UpdateStateAfterCommit_Call { return &MockExecutorI_UpdateStateAfterCommit_Call{Call: _e.mock.On("UpdateStateAfterCommit", s, resp, appHash, height, lastHeaderHash)} } @@ -535,19 +535,19 @@ func (_c *MockExecutorI_UpdateStateAfterCommit_Call) RunAndReturn(run func(*type return _c } -// UpdateStateAfterInitChain provides a mock function with given fields: s, res + func (_m *MockExecutorI) UpdateStateAfterInitChain(s *types.State, res *abcitypes.ResponseInitChain) { _m.Called(s, res) } -// MockExecutorI_UpdateStateAfterInitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateStateAfterInitChain' + type MockExecutorI_UpdateStateAfterInitChain_Call struct { *mock.Call } -// UpdateStateAfterInitChain is a helper method to define mock.On call -// - s *types.State -// - res *abcitypes.ResponseInitChain + + + func (_e *MockExecutorI_Expecter) UpdateStateAfterInitChain(s interface{}, res interface{}) *MockExecutorI_UpdateStateAfterInitChain_Call { return &MockExecutorI_UpdateStateAfterInitChain_Call{Call: _e.mock.On("UpdateStateAfterInitChain", s, res)} } @@ -569,8 +569,8 @@ func (_c *MockExecutorI_UpdateStateAfterInitChain_Call) RunAndReturn(run func(*t return _c } -// NewMockExecutorI creates a new instance of MockExecutorI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockExecutorI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go index 932c51a2e..54b9098d2 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package block @@ -8,7 +8,7 @@ import ( mock "github.com/stretchr/testify/mock" ) -// MockFraudHandler is an autogenerated mock type for the FraudHandler type + type MockFraudHandler struct { mock.Mock } @@ -21,19 +21,19 @@ func (_m *MockFraudHandler) EXPECT() *MockFraudHandler_Expecter { return &MockFraudHandler_Expecter{mock: &_m.Mock} } -// HandleFault provides a mock function with given fields: ctx, fault + func (_m *MockFraudHandler) HandleFault(ctx context.Context, fault error) { _m.Called(ctx, fault) } -// MockFraudHandler_HandleFault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HandleFault' + type MockFraudHandler_HandleFault_Call struct { *mock.Call } -// HandleFault is a helper method to define mock.On call -// - ctx context.Context -// - fault error + + + func (_e *MockFraudHandler_Expecter) HandleFault(ctx interface{}, fault interface{}) *MockFraudHandler_HandleFault_Call { return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", ctx, fault)} } @@ -55,8 +55,8 @@ func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(context.Conte return _c } -// NewMockFraudHandler creates a new instance of MockFraudHandler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockFraudHandler(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go index 6a52c1df8..b591d3572 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go +++ b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package avail @@ -14,7 +14,7 @@ import ( types "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) -// MockSubstrateApiI is an autogenerated mock type for the SubstrateApiI type + type MockSubstrateApiI struct { mock.Mock } @@ -27,7 +27,7 @@ func (_m *MockSubstrateApiI) EXPECT() *MockSubstrateApiI_Expecter { return &MockSubstrateApiI_Expecter{mock: &_m.Mock} } -// GetBlock provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, error) { ret := _m.Called(blockHash) @@ -57,13 +57,13 @@ func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, return r0, r1 } -// MockSubstrateApiI_GetBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlock' + type MockSubstrateApiI_GetBlock_Call struct { *mock.Call } -// GetBlock is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetBlock(blockHash interface{}) *MockSubstrateApiI_GetBlock_Call { return &MockSubstrateApiI_GetBlock_Call{Call: _e.mock.On("GetBlock", blockHash)} } @@ -85,7 +85,7 @@ func (_c *MockSubstrateApiI_GetBlock_Call) RunAndReturn(run func(types.Hash) (*t return _c } -// GetBlockHash provides a mock function with given fields: blockNumber + func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error) { ret := _m.Called(blockNumber) @@ -115,13 +115,13 @@ func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error return r0, r1 } -// MockSubstrateApiI_GetBlockHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHash' + type MockSubstrateApiI_GetBlockHash_Call struct { *mock.Call } -// GetBlockHash is a helper method to define mock.On call -// - blockNumber uint64 + + func (_e *MockSubstrateApiI_Expecter) GetBlockHash(blockNumber interface{}) *MockSubstrateApiI_GetBlockHash_Call { return &MockSubstrateApiI_GetBlockHash_Call{Call: _e.mock.On("GetBlockHash", blockNumber)} } @@ -143,7 +143,7 @@ func (_c *MockSubstrateApiI_GetBlockHash_Call) RunAndReturn(run func(uint64) (ty return _c } -// GetBlockHashLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { ret := _m.Called() @@ -173,12 +173,12 @@ func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { return r0, r1 } -// MockSubstrateApiI_GetBlockHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHashLatest' + type MockSubstrateApiI_GetBlockHashLatest_Call struct { *mock.Call } -// GetBlockHashLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetBlockHashLatest() *MockSubstrateApiI_GetBlockHashLatest_Call { return &MockSubstrateApiI_GetBlockHashLatest_Call{Call: _e.mock.On("GetBlockHashLatest")} } @@ -200,7 +200,7 @@ func (_c *MockSubstrateApiI_GetBlockHashLatest_Call) RunAndReturn(run func() (ty return _c } -// GetBlockLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { ret := _m.Called() @@ -230,12 +230,12 @@ func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { return r0, r1 } -// MockSubstrateApiI_GetBlockLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockLatest' + type MockSubstrateApiI_GetBlockLatest_Call struct { *mock.Call } -// GetBlockLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetBlockLatest() *MockSubstrateApiI_GetBlockLatest_Call { return &MockSubstrateApiI_GetBlockLatest_Call{Call: _e.mock.On("GetBlockLatest")} } @@ -257,7 +257,7 @@ func (_c *MockSubstrateApiI_GetBlockLatest_Call) RunAndReturn(run func() (*types return _c } -// GetChildKeys provides a mock function with given fields: childStorageKey, prefix, blockHash + func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix, blockHash) @@ -287,15 +287,15 @@ func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, pref return r0, r1 } -// MockSubstrateApiI_GetChildKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeys' + type MockSubstrateApiI_GetChildKeys_Call struct { *mock.Call } -// GetChildKeys is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - prefix types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildKeys(childStorageKey interface{}, prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildKeys_Call { return &MockSubstrateApiI_GetChildKeys_Call{Call: _e.mock.On("GetChildKeys", childStorageKey, prefix, blockHash)} } @@ -317,7 +317,7 @@ func (_c *MockSubstrateApiI_GetChildKeys_Call) RunAndReturn(run func(types.Stora return _c } -// GetChildKeysLatest provides a mock function with given fields: childStorageKey, prefix + func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey, prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix) @@ -347,14 +347,14 @@ func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey return r0, r1 } -// MockSubstrateApiI_GetChildKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildKeysLatest' + type MockSubstrateApiI_GetChildKeysLatest_Call struct { *mock.Call } -// GetChildKeysLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - prefix types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildKeysLatest(childStorageKey interface{}, prefix interface{}) *MockSubstrateApiI_GetChildKeysLatest_Call { return &MockSubstrateApiI_GetChildKeysLatest_Call{Call: _e.mock.On("GetChildKeysLatest", childStorageKey, prefix)} } @@ -376,7 +376,7 @@ func (_c *MockSubstrateApiI_GetChildKeysLatest_Call) RunAndReturn(run func(types return _c } -// GetChildStorage provides a mock function with given fields: childStorageKey, key, target, blockHash + func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(childStorageKey, key, target, blockHash) @@ -404,16 +404,16 @@ func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, k return r0, r1 } -// MockSubstrateApiI_GetChildStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorage' + type MockSubstrateApiI_GetChildStorage_Call struct { *mock.Call } -// GetChildStorage is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - target interface{} -// - blockHash types.Hash + + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorage(childStorageKey interface{}, key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorage_Call { return &MockSubstrateApiI_GetChildStorage_Call{Call: _e.mock.On("GetChildStorage", childStorageKey, key, target, blockHash)} } @@ -435,7 +435,7 @@ func (_c *MockSubstrateApiI_GetChildStorage_Call) RunAndReturn(run func(types.St return _c } -// GetChildStorageHash provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -465,15 +465,15 @@ func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKe return r0, r1 } -// MockSubstrateApiI_GetChildStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHash' + type MockSubstrateApiI_GetChildStorageHash_Call struct { *mock.Call } -// GetChildStorageHash is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageHash(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageHash_Call { return &MockSubstrateApiI_GetChildStorageHash_Call{Call: _e.mock.On("GetChildStorageHash", childStorageKey, key, blockHash)} } @@ -495,7 +495,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHash_Call) RunAndReturn(run func(type return _c } -// GetChildStorageHashLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.Hash, error) { ret := _m.Called(childStorageKey, key) @@ -525,14 +525,14 @@ func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.Sto return r0, r1 } -// MockSubstrateApiI_GetChildStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageHashLatest' + type MockSubstrateApiI_GetChildStorageHashLatest_Call struct { *mock.Call } -// GetChildStorageHashLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageHashLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageHashLatest_Call { return &MockSubstrateApiI_GetChildStorageHashLatest_Call{Call: _e.mock.On("GetChildStorageHashLatest", childStorageKey, key)} } @@ -554,7 +554,7 @@ func (_c *MockSubstrateApiI_GetChildStorageHashLatest_Call) RunAndReturn(run fun return _c } -// GetChildStorageLatest provides a mock function with given fields: childStorageKey, key, target + func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.StorageKey, key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(childStorageKey, key, target) @@ -582,15 +582,15 @@ func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.Storage return r0, r1 } -// MockSubstrateApiI_GetChildStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageLatest' + type MockSubstrateApiI_GetChildStorageLatest_Call struct { *mock.Call } -// GetChildStorageLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - target interface{} + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageLatest(childStorageKey interface{}, key interface{}, target interface{}) *MockSubstrateApiI_GetChildStorageLatest_Call { return &MockSubstrateApiI_GetChildStorageLatest_Call{Call: _e.mock.On("GetChildStorageLatest", childStorageKey, key, target)} } @@ -612,7 +612,7 @@ func (_c *MockSubstrateApiI_GetChildStorageLatest_Call) RunAndReturn(run func(ty return _c } -// GetChildStorageRaw provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -642,15 +642,15 @@ func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey return r0, r1 } -// MockSubstrateApiI_GetChildStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRaw' + type MockSubstrateApiI_GetChildStorageRaw_Call struct { *mock.Call } -// GetChildStorageRaw is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageRaw(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageRaw_Call { return &MockSubstrateApiI_GetChildStorageRaw_Call{Call: _e.mock.On("GetChildStorageRaw", childStorageKey, key, blockHash)} } @@ -672,7 +672,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRaw_Call) RunAndReturn(run func(types return _c } -// GetChildStorageRawLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.StorageKey, key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key) @@ -702,14 +702,14 @@ func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.Stor return r0, r1 } -// MockSubstrateApiI_GetChildStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageRawLatest' + type MockSubstrateApiI_GetChildStorageRawLatest_Call struct { *mock.Call } -// GetChildStorageRawLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageRawLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageRawLatest_Call { return &MockSubstrateApiI_GetChildStorageRawLatest_Call{Call: _e.mock.On("GetChildStorageRawLatest", childStorageKey, key)} } @@ -731,7 +731,7 @@ func (_c *MockSubstrateApiI_GetChildStorageRawLatest_Call) RunAndReturn(run func return _c } -// GetChildStorageSize provides a mock function with given fields: childStorageKey, key, blockHash + func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -759,15 +759,15 @@ func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKe return r0, r1 } -// MockSubstrateApiI_GetChildStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSize' + type MockSubstrateApiI_GetChildStorageSize_Call struct { *mock.Call } -// GetChildStorageSize is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageSize(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageSize_Call { return &MockSubstrateApiI_GetChildStorageSize_Call{Call: _e.mock.On("GetChildStorageSize", childStorageKey, key, blockHash)} } @@ -789,7 +789,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSize_Call) RunAndReturn(run func(type return _c } -// GetChildStorageSizeLatest provides a mock function with given fields: childStorageKey, key + func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.U64, error) { ret := _m.Called(childStorageKey, key) @@ -817,14 +817,14 @@ func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.Sto return r0, r1 } -// MockSubstrateApiI_GetChildStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetChildStorageSizeLatest' + type MockSubstrateApiI_GetChildStorageSizeLatest_Call struct { *mock.Call } -// GetChildStorageSizeLatest is a helper method to define mock.On call -// - childStorageKey types.StorageKey -// - key types.StorageKey + + + func (_e *MockSubstrateApiI_Expecter) GetChildStorageSizeLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageSizeLatest_Call { return &MockSubstrateApiI_GetChildStorageSizeLatest_Call{Call: _e.mock.On("GetChildStorageSizeLatest", childStorageKey, key)} } @@ -846,7 +846,7 @@ func (_c *MockSubstrateApiI_GetChildStorageSizeLatest_Call) RunAndReturn(run fun return _c } -// GetFinalizedHead provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { ret := _m.Called() @@ -876,12 +876,12 @@ func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { return r0, r1 } -// MockSubstrateApiI_GetFinalizedHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFinalizedHead' + type MockSubstrateApiI_GetFinalizedHead_Call struct { *mock.Call } -// GetFinalizedHead is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetFinalizedHead() *MockSubstrateApiI_GetFinalizedHead_Call { return &MockSubstrateApiI_GetFinalizedHead_Call{Call: _e.mock.On("GetFinalizedHead")} } @@ -903,7 +903,7 @@ func (_c *MockSubstrateApiI_GetFinalizedHead_Call) RunAndReturn(run func() (type return _c } -// GetHeader provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, error) { ret := _m.Called(blockHash) @@ -933,13 +933,13 @@ func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, err return r0, r1 } -// MockSubstrateApiI_GetHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeader' + type MockSubstrateApiI_GetHeader_Call struct { *mock.Call } -// GetHeader is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetHeader(blockHash interface{}) *MockSubstrateApiI_GetHeader_Call { return &MockSubstrateApiI_GetHeader_Call{Call: _e.mock.On("GetHeader", blockHash)} } @@ -961,7 +961,7 @@ func (_c *MockSubstrateApiI_GetHeader_Call) RunAndReturn(run func(types.Hash) (* return _c } -// GetHeaderLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { ret := _m.Called() @@ -991,12 +991,12 @@ func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { return r0, r1 } -// MockSubstrateApiI_GetHeaderLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeaderLatest' + type MockSubstrateApiI_GetHeaderLatest_Call struct { *mock.Call } -// GetHeaderLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetHeaderLatest() *MockSubstrateApiI_GetHeaderLatest_Call { return &MockSubstrateApiI_GetHeaderLatest_Call{Call: _e.mock.On("GetHeaderLatest")} } @@ -1018,7 +1018,7 @@ func (_c *MockSubstrateApiI_GetHeaderLatest_Call) RunAndReturn(run func() (*type return _c } -// GetKeys provides a mock function with given fields: prefix, blockHash + func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(prefix, blockHash) @@ -1048,14 +1048,14 @@ func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Ha return r0, r1 } -// MockSubstrateApiI_GetKeys_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeys' + type MockSubstrateApiI_GetKeys_Call struct { *mock.Call } -// GetKeys is a helper method to define mock.On call -// - prefix types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetKeys(prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetKeys_Call { return &MockSubstrateApiI_GetKeys_Call{Call: _e.mock.On("GetKeys", prefix, blockHash)} } @@ -1077,7 +1077,7 @@ func (_c *MockSubstrateApiI_GetKeys_Call) RunAndReturn(run func(types.StorageKey return _c } -// GetKeysLatest provides a mock function with given fields: prefix + func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(prefix) @@ -1107,13 +1107,13 @@ func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.Sto return r0, r1 } -// MockSubstrateApiI_GetKeysLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetKeysLatest' + type MockSubstrateApiI_GetKeysLatest_Call struct { *mock.Call } -// GetKeysLatest is a helper method to define mock.On call -// - prefix types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetKeysLatest(prefix interface{}) *MockSubstrateApiI_GetKeysLatest_Call { return &MockSubstrateApiI_GetKeysLatest_Call{Call: _e.mock.On("GetKeysLatest", prefix)} } @@ -1135,7 +1135,7 @@ func (_c *MockSubstrateApiI_GetKeysLatest_Call) RunAndReturn(run func(types.Stor return _c } -// GetMetadata provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, error) { ret := _m.Called(blockHash) @@ -1165,13 +1165,13 @@ func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, return r0, r1 } -// MockSubstrateApiI_GetMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadata' + type MockSubstrateApiI_GetMetadata_Call struct { *mock.Call } -// GetMetadata is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetMetadata(blockHash interface{}) *MockSubstrateApiI_GetMetadata_Call { return &MockSubstrateApiI_GetMetadata_Call{Call: _e.mock.On("GetMetadata", blockHash)} } @@ -1193,7 +1193,7 @@ func (_c *MockSubstrateApiI_GetMetadata_Call) RunAndReturn(run func(types.Hash) return _c } -// GetMetadataLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { ret := _m.Called() @@ -1223,12 +1223,12 @@ func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { return r0, r1 } -// MockSubstrateApiI_GetMetadataLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetadataLatest' + type MockSubstrateApiI_GetMetadataLatest_Call struct { *mock.Call } -// GetMetadataLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetMetadataLatest() *MockSubstrateApiI_GetMetadataLatest_Call { return &MockSubstrateApiI_GetMetadataLatest_Call{Call: _e.mock.On("GetMetadataLatest")} } @@ -1250,7 +1250,7 @@ func (_c *MockSubstrateApiI_GetMetadataLatest_Call) RunAndReturn(run func() (*ty return _c } -// GetRuntimeVersion provides a mock function with given fields: blockHash + func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.RuntimeVersion, error) { ret := _m.Called(blockHash) @@ -1280,13 +1280,13 @@ func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.Run return r0, r1 } -// MockSubstrateApiI_GetRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersion' + type MockSubstrateApiI_GetRuntimeVersion_Call struct { *mock.Call } -// GetRuntimeVersion is a helper method to define mock.On call -// - blockHash types.Hash + + func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersion(blockHash interface{}) *MockSubstrateApiI_GetRuntimeVersion_Call { return &MockSubstrateApiI_GetRuntimeVersion_Call{Call: _e.mock.On("GetRuntimeVersion", blockHash)} } @@ -1308,7 +1308,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersion_Call) RunAndReturn(run func(types. return _c } -// GetRuntimeVersionLatest provides a mock function with given fields: + func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, error) { ret := _m.Called() @@ -1338,12 +1338,12 @@ func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, e return r0, r1 } -// MockSubstrateApiI_GetRuntimeVersionLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRuntimeVersionLatest' + type MockSubstrateApiI_GetRuntimeVersionLatest_Call struct { *mock.Call } -// GetRuntimeVersionLatest is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersionLatest() *MockSubstrateApiI_GetRuntimeVersionLatest_Call { return &MockSubstrateApiI_GetRuntimeVersionLatest_Call{Call: _e.mock.On("GetRuntimeVersionLatest")} } @@ -1365,7 +1365,7 @@ func (_c *MockSubstrateApiI_GetRuntimeVersionLatest_Call) RunAndReturn(run func( return _c } -// GetStorage provides a mock function with given fields: key, target, blockHash + func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(key, target, blockHash) @@ -1393,15 +1393,15 @@ func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{} return r0, r1 } -// MockSubstrateApiI_GetStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorage' + type MockSubstrateApiI_GetStorage_Call struct { *mock.Call } -// GetStorage is a helper method to define mock.On call -// - key types.StorageKey -// - target interface{} -// - blockHash types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) GetStorage(key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorage_Call { return &MockSubstrateApiI_GetStorage_Call{Call: _e.mock.On("GetStorage", key, target, blockHash)} } @@ -1423,7 +1423,7 @@ func (_c *MockSubstrateApiI_GetStorage_Call) RunAndReturn(run func(types.Storage return _c } -// GetStorageHash provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(key, blockHash) @@ -1453,14 +1453,14 @@ func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash type return r0, r1 } -// MockSubstrateApiI_GetStorageHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHash' + type MockSubstrateApiI_GetStorageHash_Call struct { *mock.Call } -// GetStorageHash is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageHash(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageHash_Call { return &MockSubstrateApiI_GetStorageHash_Call{Call: _e.mock.On("GetStorageHash", key, blockHash)} } @@ -1482,7 +1482,7 @@ func (_c *MockSubstrateApiI_GetStorageHash_Call) RunAndReturn(run func(types.Sto return _c } -// GetStorageHashLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.Hash, error) { ret := _m.Called(key) @@ -1512,13 +1512,13 @@ func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.H return r0, r1 } -// MockSubstrateApiI_GetStorageHashLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageHashLatest' + type MockSubstrateApiI_GetStorageHashLatest_Call struct { *mock.Call } -// GetStorageHashLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageHashLatest(key interface{}) *MockSubstrateApiI_GetStorageHashLatest_Call { return &MockSubstrateApiI_GetStorageHashLatest_Call{Call: _e.mock.On("GetStorageHashLatest", key)} } @@ -1540,7 +1540,7 @@ func (_c *MockSubstrateApiI_GetStorageHashLatest_Call) RunAndReturn(run func(typ return _c } -// GetStorageLatest provides a mock function with given fields: key, target + func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(key, target) @@ -1568,14 +1568,14 @@ func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target inter return r0, r1 } -// MockSubstrateApiI_GetStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageLatest' + type MockSubstrateApiI_GetStorageLatest_Call struct { *mock.Call } -// GetStorageLatest is a helper method to define mock.On call -// - key types.StorageKey -// - target interface{} + + + func (_e *MockSubstrateApiI_Expecter) GetStorageLatest(key interface{}, target interface{}) *MockSubstrateApiI_GetStorageLatest_Call { return &MockSubstrateApiI_GetStorageLatest_Call{Call: _e.mock.On("GetStorageLatest", key, target)} } @@ -1597,7 +1597,7 @@ func (_c *MockSubstrateApiI_GetStorageLatest_Call) RunAndReturn(run func(types.S return _c } -// GetStorageRaw provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(key, blockHash) @@ -1627,14 +1627,14 @@ func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types return r0, r1 } -// MockSubstrateApiI_GetStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRaw' + type MockSubstrateApiI_GetStorageRaw_Call struct { *mock.Call } -// GetStorageRaw is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageRaw(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageRaw_Call { return &MockSubstrateApiI_GetStorageRaw_Call{Call: _e.mock.On("GetStorageRaw", key, blockHash)} } @@ -1656,7 +1656,7 @@ func (_c *MockSubstrateApiI_GetStorageRaw_Call) RunAndReturn(run func(types.Stor return _c } -// GetStorageRawLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(key) @@ -1686,13 +1686,13 @@ func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.S return r0, r1 } -// MockSubstrateApiI_GetStorageRawLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageRawLatest' + type MockSubstrateApiI_GetStorageRawLatest_Call struct { *mock.Call } -// GetStorageRawLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageRawLatest(key interface{}) *MockSubstrateApiI_GetStorageRawLatest_Call { return &MockSubstrateApiI_GetStorageRawLatest_Call{Call: _e.mock.On("GetStorageRawLatest", key)} } @@ -1714,7 +1714,7 @@ func (_c *MockSubstrateApiI_GetStorageRawLatest_Call) RunAndReturn(run func(type return _c } -// GetStorageSize provides a mock function with given fields: key, blockHash + func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(key, blockHash) @@ -1742,14 +1742,14 @@ func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash type return r0, r1 } -// MockSubstrateApiI_GetStorageSize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSize' + type MockSubstrateApiI_GetStorageSize_Call struct { *mock.Call } -// GetStorageSize is a helper method to define mock.On call -// - key types.StorageKey -// - blockHash types.Hash + + + func (_e *MockSubstrateApiI_Expecter) GetStorageSize(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageSize_Call { return &MockSubstrateApiI_GetStorageSize_Call{Call: _e.mock.On("GetStorageSize", key, blockHash)} } @@ -1771,7 +1771,7 @@ func (_c *MockSubstrateApiI_GetStorageSize_Call) RunAndReturn(run func(types.Sto return _c } -// GetStorageSizeLatest provides a mock function with given fields: key + func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U64, error) { ret := _m.Called(key) @@ -1799,13 +1799,13 @@ func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U return r0, r1 } -// MockSubstrateApiI_GetStorageSizeLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetStorageSizeLatest' + type MockSubstrateApiI_GetStorageSizeLatest_Call struct { *mock.Call } -// GetStorageSizeLatest is a helper method to define mock.On call -// - key types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) GetStorageSizeLatest(key interface{}) *MockSubstrateApiI_GetStorageSizeLatest_Call { return &MockSubstrateApiI_GetStorageSizeLatest_Call{Call: _e.mock.On("GetStorageSizeLatest", key)} } @@ -1827,7 +1827,7 @@ func (_c *MockSubstrateApiI_GetStorageSizeLatest_Call) RunAndReturn(run func(typ return _c } -// PendingExtrinsics provides a mock function with given fields: + func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { ret := _m.Called() @@ -1857,12 +1857,12 @@ func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { return r0, r1 } -// MockSubstrateApiI_PendingExtrinsics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingExtrinsics' + type MockSubstrateApiI_PendingExtrinsics_Call struct { *mock.Call } -// PendingExtrinsics is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) PendingExtrinsics() *MockSubstrateApiI_PendingExtrinsics_Call { return &MockSubstrateApiI_PendingExtrinsics_Call{Call: _e.mock.On("PendingExtrinsics")} } @@ -1884,7 +1884,7 @@ func (_c *MockSubstrateApiI_PendingExtrinsics_Call) RunAndReturn(run func() ([]t return _c } -// QueryStorage provides a mock function with given fields: keys, startBlock, block + func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock types.Hash, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock, block) @@ -1914,15 +1914,15 @@ func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock ty return r0, r1 } -// MockSubstrateApiI_QueryStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorage' + type MockSubstrateApiI_QueryStorage_Call struct { *mock.Call } -// QueryStorage is a helper method to define mock.On call -// - keys []types.StorageKey -// - startBlock types.Hash -// - block types.Hash + + + + func (_e *MockSubstrateApiI_Expecter) QueryStorage(keys interface{}, startBlock interface{}, block interface{}) *MockSubstrateApiI_QueryStorage_Call { return &MockSubstrateApiI_QueryStorage_Call{Call: _e.mock.On("QueryStorage", keys, startBlock, block)} } @@ -1944,7 +1944,7 @@ func (_c *MockSubstrateApiI_QueryStorage_Call) RunAndReturn(run func([]types.Sto return _c } -// QueryStorageAt provides a mock function with given fields: keys, block + func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, block) @@ -1974,14 +1974,14 @@ func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types return r0, r1 } -// MockSubstrateApiI_QueryStorageAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAt' + type MockSubstrateApiI_QueryStorageAt_Call struct { *mock.Call } -// QueryStorageAt is a helper method to define mock.On call -// - keys []types.StorageKey -// - block types.Hash + + + func (_e *MockSubstrateApiI_Expecter) QueryStorageAt(keys interface{}, block interface{}) *MockSubstrateApiI_QueryStorageAt_Call { return &MockSubstrateApiI_QueryStorageAt_Call{Call: _e.mock.On("QueryStorageAt", keys, block)} } @@ -2003,7 +2003,7 @@ func (_c *MockSubstrateApiI_QueryStorageAt_Call) RunAndReturn(run func([]types.S return _c } -// QueryStorageAtLatest provides a mock function with given fields: keys + func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]types.StorageChangeSet, error) { ret := _m.Called(keys) @@ -2033,13 +2033,13 @@ func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]ty return r0, r1 } -// MockSubstrateApiI_QueryStorageAtLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageAtLatest' + type MockSubstrateApiI_QueryStorageAtLatest_Call struct { *mock.Call } -// QueryStorageAtLatest is a helper method to define mock.On call -// - keys []types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) QueryStorageAtLatest(keys interface{}) *MockSubstrateApiI_QueryStorageAtLatest_Call { return &MockSubstrateApiI_QueryStorageAtLatest_Call{Call: _e.mock.On("QueryStorageAtLatest", keys)} } @@ -2061,7 +2061,7 @@ func (_c *MockSubstrateApiI_QueryStorageAtLatest_Call) RunAndReturn(run func([]t return _c } -// QueryStorageLatest provides a mock function with given fields: keys, startBlock + func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBlock types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock) @@ -2091,14 +2091,14 @@ func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBl return r0, r1 } -// MockSubstrateApiI_QueryStorageLatest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryStorageLatest' + type MockSubstrateApiI_QueryStorageLatest_Call struct { *mock.Call } -// QueryStorageLatest is a helper method to define mock.On call -// - keys []types.StorageKey -// - startBlock types.Hash + + + func (_e *MockSubstrateApiI_Expecter) QueryStorageLatest(keys interface{}, startBlock interface{}) *MockSubstrateApiI_QueryStorageLatest_Call { return &MockSubstrateApiI_QueryStorageLatest_Call{Call: _e.mock.On("QueryStorageLatest", keys, startBlock)} } @@ -2120,7 +2120,7 @@ func (_c *MockSubstrateApiI_QueryStorageLatest_Call) RunAndReturn(run func([]typ return _c } -// SubmitAndWatchExtrinsic provides a mock function with given fields: xt + func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*author.ExtrinsicStatusSubscription, error) { ret := _m.Called(xt) @@ -2150,13 +2150,13 @@ func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*autho return r0, r1 } -// MockSubstrateApiI_SubmitAndWatchExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitAndWatchExtrinsic' + type MockSubstrateApiI_SubmitAndWatchExtrinsic_Call struct { *mock.Call } -// SubmitAndWatchExtrinsic is a helper method to define mock.On call -// - xt types.Extrinsic + + func (_e *MockSubstrateApiI_Expecter) SubmitAndWatchExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call { return &MockSubstrateApiI_SubmitAndWatchExtrinsic_Call{Call: _e.mock.On("SubmitAndWatchExtrinsic", xt)} } @@ -2178,7 +2178,7 @@ func (_c *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call) RunAndReturn(run func( return _c } -// SubmitExtrinsic provides a mock function with given fields: xt + func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, error) { ret := _m.Called(xt) @@ -2208,13 +2208,13 @@ func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, er return r0, r1 } -// MockSubstrateApiI_SubmitExtrinsic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitExtrinsic' + type MockSubstrateApiI_SubmitExtrinsic_Call struct { *mock.Call } -// SubmitExtrinsic is a helper method to define mock.On call -// - xt types.Extrinsic + + func (_e *MockSubstrateApiI_Expecter) SubmitExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitExtrinsic_Call { return &MockSubstrateApiI_SubmitExtrinsic_Call{Call: _e.mock.On("SubmitExtrinsic", xt)} } @@ -2236,7 +2236,7 @@ func (_c *MockSubstrateApiI_SubmitExtrinsic_Call) RunAndReturn(run func(types.Ex return _c } -// SubscribeFinalizedHeads provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSubscription, error) { ret := _m.Called() @@ -2266,12 +2266,12 @@ func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSub return r0, r1 } -// MockSubstrateApiI_SubscribeFinalizedHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFinalizedHeads' + type MockSubstrateApiI_SubscribeFinalizedHeads_Call struct { *mock.Call } -// SubscribeFinalizedHeads is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeFinalizedHeads() *MockSubstrateApiI_SubscribeFinalizedHeads_Call { return &MockSubstrateApiI_SubscribeFinalizedHeads_Call{Call: _e.mock.On("SubscribeFinalizedHeads")} } @@ -2293,7 +2293,7 @@ func (_c *MockSubstrateApiI_SubscribeFinalizedHeads_Call) RunAndReturn(run func( return _c } -// SubscribeNewHeads provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, error) { ret := _m.Called() @@ -2323,12 +2323,12 @@ func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, e return r0, r1 } -// MockSubstrateApiI_SubscribeNewHeads_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHeads' + type MockSubstrateApiI_SubscribeNewHeads_Call struct { *mock.Call } -// SubscribeNewHeads is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeNewHeads() *MockSubstrateApiI_SubscribeNewHeads_Call { return &MockSubstrateApiI_SubscribeNewHeads_Call{Call: _e.mock.On("SubscribeNewHeads")} } @@ -2350,7 +2350,7 @@ func (_c *MockSubstrateApiI_SubscribeNewHeads_Call) RunAndReturn(run func() (*ch return _c } -// SubscribeRuntimeVersion provides a mock function with given fields: + func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSubscription, error) { ret := _m.Called() @@ -2380,12 +2380,12 @@ func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSub return r0, r1 } -// MockSubstrateApiI_SubscribeRuntimeVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeRuntimeVersion' + type MockSubstrateApiI_SubscribeRuntimeVersion_Call struct { *mock.Call } -// SubscribeRuntimeVersion is a helper method to define mock.On call + func (_e *MockSubstrateApiI_Expecter) SubscribeRuntimeVersion() *MockSubstrateApiI_SubscribeRuntimeVersion_Call { return &MockSubstrateApiI_SubscribeRuntimeVersion_Call{Call: _e.mock.On("SubscribeRuntimeVersion")} } @@ -2407,7 +2407,7 @@ func (_c *MockSubstrateApiI_SubscribeRuntimeVersion_Call) RunAndReturn(run func( return _c } -// SubscribeStorageRaw provides a mock function with given fields: keys + func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*state.StorageSubscription, error) { ret := _m.Called(keys) @@ -2437,13 +2437,13 @@ func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*stat return r0, r1 } -// MockSubstrateApiI_SubscribeStorageRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeStorageRaw' + type MockSubstrateApiI_SubscribeStorageRaw_Call struct { *mock.Call } -// SubscribeStorageRaw is a helper method to define mock.On call -// - keys []types.StorageKey + + func (_e *MockSubstrateApiI_Expecter) SubscribeStorageRaw(keys interface{}) *MockSubstrateApiI_SubscribeStorageRaw_Call { return &MockSubstrateApiI_SubscribeStorageRaw_Call{Call: _e.mock.On("SubscribeStorageRaw", keys)} } @@ -2465,8 +2465,8 @@ func (_c *MockSubstrateApiI_SubscribeStorageRaw_Call) RunAndReturn(run func([]ty return _c } -// NewMockSubstrateApiI creates a new instance of MockSubstrateApiI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockSubstrateApiI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go index f80184e4f..cb248d62a 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -16,7 +16,7 @@ import ( share "github.com/celestiaorg/celestia-openrpc/types/share" ) -// MockCelestiaRPCClient is an autogenerated mock type for the CelestiaRPCClient type + type MockCelestiaRPCClient struct { mock.Mock } @@ -29,7 +29,7 @@ func (_m *MockCelestiaRPCClient) EXPECT() *MockCelestiaRPCClient_Expecter { return &MockCelestiaRPCClient_Expecter{mock: &_m.Mock} } -// Get provides a mock function with given fields: ctx, height, namespace, commitment + func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -59,16 +59,16 @@ func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespa return r0, r1 } -// MockCelestiaRPCClient_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' + type MockCelestiaRPCClient_Get_Call struct { *mock.Call } -// Get is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - commitment blob.Commitment + + + + + func (_e *MockCelestiaRPCClient_Expecter) Get(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_Get_Call { return &MockCelestiaRPCClient_Get_Call{Call: _e.mock.On("Get", ctx, height, namespace, commitment)} } @@ -90,7 +90,7 @@ func (_c *MockCelestiaRPCClient_Get_Call) RunAndReturn(run func(context.Context, return _c } -// GetAll provides a mock function with given fields: _a0, _a1, _a2 + func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []share.Namespace) ([]*blob.Blob, error) { ret := _m.Called(_a0, _a1, _a2) @@ -120,15 +120,15 @@ func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []s return r0, r1 } -// MockCelestiaRPCClient_GetAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAll' + type MockCelestiaRPCClient_GetAll_Call struct { *mock.Call } -// GetAll is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 uint64 -// - _a2 []share.Namespace + + + + func (_e *MockCelestiaRPCClient_Expecter) GetAll(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockCelestiaRPCClient_GetAll_Call { return &MockCelestiaRPCClient_GetAll_Call{Call: _e.mock.On("GetAll", _a0, _a1, _a2)} } @@ -150,7 +150,7 @@ func (_c *MockCelestiaRPCClient_GetAll_Call) RunAndReturn(run func(context.Conte return _c } -// GetByHeight provides a mock function with given fields: ctx, height + func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { ret := _m.Called(ctx, height) @@ -180,14 +180,14 @@ func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) return r0, r1 } -// MockCelestiaRPCClient_GetByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByHeight' + type MockCelestiaRPCClient_GetByHeight_Call struct { *mock.Call } -// GetByHeight is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 + + + func (_e *MockCelestiaRPCClient_Expecter) GetByHeight(ctx interface{}, height interface{}) *MockCelestiaRPCClient_GetByHeight_Call { return &MockCelestiaRPCClient_GetByHeight_Call{Call: _e.mock.On("GetByHeight", ctx, height)} } @@ -209,7 +209,7 @@ func (_c *MockCelestiaRPCClient_GetByHeight_Call) RunAndReturn(run func(context. return _c } -// GetProof provides a mock function with given fields: ctx, height, namespace, commitment + func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -239,16 +239,16 @@ func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, na return r0, r1 } -// MockCelestiaRPCClient_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' + type MockCelestiaRPCClient_GetProof_Call struct { *mock.Call } -// GetProof is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - commitment blob.Commitment + + + + + func (_e *MockCelestiaRPCClient_Expecter) GetProof(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_GetProof_Call { return &MockCelestiaRPCClient_GetProof_Call{Call: _e.mock.On("GetProof", ctx, height, namespace, commitment)} } @@ -270,7 +270,7 @@ func (_c *MockCelestiaRPCClient_GetProof_Call) RunAndReturn(run func(context.Con return _c } -// GetSignerBalance provides a mock function with given fields: ctx + func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coin, error) { ret := _m.Called(ctx) @@ -300,13 +300,13 @@ func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coi return r0, r1 } -// MockCelestiaRPCClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockCelestiaRPCClient_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call -// - ctx context.Context + + func (_e *MockCelestiaRPCClient_Expecter) GetSignerBalance(ctx interface{}) *MockCelestiaRPCClient_GetSignerBalance_Call { return &MockCelestiaRPCClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance", ctx)} } @@ -328,7 +328,7 @@ func (_c *MockCelestiaRPCClient_GetSignerBalance_Call) RunAndReturn(run func(con return _c } -// Included provides a mock function with given fields: ctx, height, namespace, proof, commitment + func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { ret := _m.Called(ctx, height, namespace, proof, commitment) @@ -356,17 +356,17 @@ func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, na return r0, r1 } -// MockCelestiaRPCClient_Included_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Included' + type MockCelestiaRPCClient_Included_Call struct { *mock.Call } -// Included is a helper method to define mock.On call -// - ctx context.Context -// - height uint64 -// - namespace share.Namespace -// - proof *blob.Proof -// - commitment blob.Commitment + + + + + + func (_e *MockCelestiaRPCClient_Expecter) Included(ctx interface{}, height interface{}, namespace interface{}, proof interface{}, commitment interface{}) *MockCelestiaRPCClient_Included_Call { return &MockCelestiaRPCClient_Included_Call{Call: _e.mock.On("Included", ctx, height, namespace, proof, commitment)} } @@ -388,7 +388,7 @@ func (_c *MockCelestiaRPCClient_Included_Call) RunAndReturn(run func(context.Con return _c } -// Submit provides a mock function with given fields: ctx, blobs, options + func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { ret := _m.Called(ctx, blobs, options) @@ -416,15 +416,15 @@ func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, return r0, r1 } -// MockCelestiaRPCClient_Submit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Submit' + type MockCelestiaRPCClient_Submit_Call struct { *mock.Call } -// Submit is a helper method to define mock.On call -// - ctx context.Context -// - blobs []*blob.Blob -// - options *blob.SubmitOptions + + + + func (_e *MockCelestiaRPCClient_Expecter) Submit(ctx interface{}, blobs interface{}, options interface{}) *MockCelestiaRPCClient_Submit_Call { return &MockCelestiaRPCClient_Submit_Call{Call: _e.mock.On("Submit", ctx, blobs, options)} } @@ -446,8 +446,8 @@ func (_c *MockCelestiaRPCClient_Submit_Call) RunAndReturn(run func(context.Conte return _c } -// NewMockCelestiaRPCClient creates a new instance of MockCelestiaRPCClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockCelestiaRPCClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go index 9c20b8b5c..c116222ed 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package da @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockDataAvailabilityLayerClient is an autogenerated mock type for the DataAvailabilityLayerClient type + type MockDataAvailabilityLayerClient struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockDataAvailabilityLayerClient) EXPECT() *MockDataAvailabilityLayerCl return &MockDataAvailabilityLayerClient_Expecter{mock: &_m.Mock} } -// CheckBatchAvailability provides a mock function with given fields: daMetaData + func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { ret := _m.Called(daMetaData) @@ -44,13 +44,13 @@ func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da return r0 } -// MockDataAvailabilityLayerClient_CheckBatchAvailability_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckBatchAvailability' + type MockDataAvailabilityLayerClient_CheckBatchAvailability_Call struct { *mock.Call } -// CheckBatchAvailability is a helper method to define mock.On call -// - daMetaData *da.DASubmitMetaData + + func (_e *MockDataAvailabilityLayerClient_Expecter) CheckBatchAvailability(daMetaData interface{}) *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call { return &MockDataAvailabilityLayerClient_CheckBatchAvailability_Call{Call: _e.mock.On("CheckBatchAvailability", daMetaData)} } @@ -72,7 +72,7 @@ func (_c *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call) RunAndRet return _c } -// GetClientType provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { ret := _m.Called() @@ -90,12 +90,12 @@ func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { return r0 } -// MockDataAvailabilityLayerClient_GetClientType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClientType' + type MockDataAvailabilityLayerClient_GetClientType_Call struct { *mock.Call } -// GetClientType is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetClientType() *MockDataAvailabilityLayerClient_GetClientType_Call { return &MockDataAvailabilityLayerClient_GetClientType_Call{Call: _e.mock.On("GetClientType")} } @@ -117,7 +117,7 @@ func (_c *MockDataAvailabilityLayerClient_GetClientType_Call) RunAndReturn(run f return _c } -// GetMaxBlobSizeBytes provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { ret := _m.Called() @@ -135,12 +135,12 @@ func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return r0 } -// MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMaxBlobSizeBytes' + type MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call struct { *mock.Call } -// GetMaxBlobSizeBytes is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetMaxBlobSizeBytes() *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call { return &MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call{Call: _e.mock.On("GetMaxBlobSizeBytes")} } @@ -162,7 +162,7 @@ func (_c *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call) RunAndReturn return _c } -// GetSignerBalance provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ret := _m.Called() @@ -190,12 +190,12 @@ func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error return r0, r1 } -// MockDataAvailabilityLayerClient_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockDataAvailabilityLayerClient_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) GetSignerBalance() *MockDataAvailabilityLayerClient_GetSignerBalance_Call { return &MockDataAvailabilityLayerClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -217,7 +217,7 @@ func (_c *MockDataAvailabilityLayerClient_GetSignerBalance_Call) RunAndReturn(ru return _c } -// Init provides a mock function with given fields: config, pubsubServer, kvStore, logger, options + func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...da.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -242,17 +242,17 @@ func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pub return r0 } -// MockDataAvailabilityLayerClient_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' + type MockDataAvailabilityLayerClient_Init_Call struct { *mock.Call } -// Init is a helper method to define mock.On call -// - config []byte -// - pubsubServer *pubsub.Server -// - kvStore store.KV -// - logger types.Logger -// - options ...da.Option + + + + + + func (_e *MockDataAvailabilityLayerClient_Expecter) Init(config interface{}, pubsubServer interface{}, kvStore interface{}, logger interface{}, options ...interface{}) *MockDataAvailabilityLayerClient_Init_Call { return &MockDataAvailabilityLayerClient_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, pubsubServer, kvStore, logger}, options...)...)} @@ -281,7 +281,7 @@ func (_c *MockDataAvailabilityLayerClient_Init_Call) RunAndReturn(run func([]byt return _c } -// Start provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) Start() error { ret := _m.Called() @@ -299,12 +299,12 @@ func (_m *MockDataAvailabilityLayerClient) Start() error { return r0 } -// MockDataAvailabilityLayerClient_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockDataAvailabilityLayerClient_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) Start() *MockDataAvailabilityLayerClient_Start_Call { return &MockDataAvailabilityLayerClient_Start_Call{Call: _e.mock.On("Start")} } @@ -326,7 +326,7 @@ func (_c *MockDataAvailabilityLayerClient_Start_Call) RunAndReturn(run func() er return _c } -// Stop provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) Stop() error { ret := _m.Called() @@ -344,12 +344,12 @@ func (_m *MockDataAvailabilityLayerClient) Stop() error { return r0 } -// MockDataAvailabilityLayerClient_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockDataAvailabilityLayerClient_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) Stop() *MockDataAvailabilityLayerClient_Stop_Call { return &MockDataAvailabilityLayerClient_Stop_Call{Call: _e.mock.On("Stop")} } @@ -371,7 +371,7 @@ func (_c *MockDataAvailabilityLayerClient_Stop_Call) RunAndReturn(run func() err return _c } -// SubmitBatch provides a mock function with given fields: batch + func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { ret := _m.Called(batch) @@ -389,13 +389,13 @@ func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.Re return r0 } -// MockDataAvailabilityLayerClient_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' + type MockDataAvailabilityLayerClient_SubmitBatch_Call struct { *mock.Call } -// SubmitBatch is a helper method to define mock.On call -// - batch *types.Batch + + func (_e *MockDataAvailabilityLayerClient_Expecter) SubmitBatch(batch interface{}) *MockDataAvailabilityLayerClient_SubmitBatch_Call { return &MockDataAvailabilityLayerClient_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch)} } @@ -417,17 +417,17 @@ func (_c *MockDataAvailabilityLayerClient_SubmitBatch_Call) RunAndReturn(run fun return _c } -// WaitForSyncing provides a mock function with given fields: + func (_m *MockDataAvailabilityLayerClient) WaitForSyncing() { _m.Called() } -// MockDataAvailabilityLayerClient_WaitForSyncing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForSyncing' + type MockDataAvailabilityLayerClient_WaitForSyncing_Call struct { *mock.Call } -// WaitForSyncing is a helper method to define mock.On call + func (_e *MockDataAvailabilityLayerClient_Expecter) WaitForSyncing() *MockDataAvailabilityLayerClient_WaitForSyncing_Call { return &MockDataAvailabilityLayerClient_WaitForSyncing_Call{Call: _e.mock.On("WaitForSyncing")} } @@ -449,8 +449,8 @@ func (_c *MockDataAvailabilityLayerClient_WaitForSyncing_Call) RunAndReturn(run return _c } -// NewMockDataAvailabilityLayerClient creates a new instance of MockDataAvailabilityLayerClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockDataAvailabilityLayerClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go index 5396f942b..de07e1a71 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) -// MockProposerGetter is an autogenerated mock type for the ProposerGetter type + type MockProposerGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockProposerGetter) EXPECT() *MockProposerGetter_Expecter { return &MockProposerGetter_Expecter{mock: &_m.Mock} } -// GetProposerPubKey provides a mock function with given fields: + func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { return r0 } -// MockProposerGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' + type MockProposerGetter_GetProposerPubKey_Call struct { *mock.Call } -// GetProposerPubKey is a helper method to define mock.On call + func (_e *MockProposerGetter_Expecter) GetProposerPubKey() *MockProposerGetter_GetProposerPubKey_Call { return &MockProposerGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockProposerGetter_GetProposerPubKey_Call) RunAndReturn(run func() cry return _c } -// GetRevision provides a mock function with given fields: + func (_m *MockProposerGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockProposerGetter) GetRevision() uint64 { return r0 } -// MockProposerGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' + type MockProposerGetter_GetRevision_Call struct { *mock.Call } -// GetRevision is a helper method to define mock.On call + func (_e *MockProposerGetter_Expecter) GetRevision() *MockProposerGetter_GetRevision_Call { return &MockProposerGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockProposerGetter_GetRevision_Call) RunAndReturn(run func() uint64) * return _c } -// NewMockProposerGetter creates a new instance of MockProposerGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockProposerGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go index 4377638cb..477be16f8 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package p2p @@ -7,7 +7,7 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) -// MockStateGetter is an autogenerated mock type for the StateGetter type + type MockStateGetter struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockStateGetter) EXPECT() *MockStateGetter_Expecter { return &MockStateGetter_Expecter{mock: &_m.Mock} } -// GetProposerPubKey provides a mock function with given fields: + func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +40,12 @@ func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { return r0 } -// MockStateGetter_GetProposerPubKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerPubKey' + type MockStateGetter_GetProposerPubKey_Call struct { *mock.Call } -// GetProposerPubKey is a helper method to define mock.On call + func (_e *MockStateGetter_Expecter) GetProposerPubKey() *MockStateGetter_GetProposerPubKey_Call { return &MockStateGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +67,7 @@ func (_c *MockStateGetter_GetProposerPubKey_Call) RunAndReturn(run func() crypto return _c } -// GetRevision provides a mock function with given fields: + func (_m *MockStateGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +85,12 @@ func (_m *MockStateGetter) GetRevision() uint64 { return r0 } -// MockStateGetter_GetRevision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRevision' + type MockStateGetter_GetRevision_Call struct { *mock.Call } -// GetRevision is a helper method to define mock.On call + func (_e *MockStateGetter_Expecter) GetRevision() *MockStateGetter_GetRevision_Call { return &MockStateGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,8 +112,8 @@ func (_c *MockStateGetter_GetRevision_Call) RunAndReturn(run func() uint64) *Moc return _c } -// NewMockStateGetter creates a new instance of MockStateGetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockStateGetter(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go index ade8efe9b..f79c856c1 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package dymension @@ -22,7 +22,7 @@ import ( types "github.com/cosmos/cosmos-sdk/types" ) -// MockCosmosClient is an autogenerated mock type for the CosmosClient type + type MockCosmosClient struct { mock.Mock } @@ -35,7 +35,7 @@ func (_m *MockCosmosClient) EXPECT() *MockCosmosClient_Expecter { return &MockCosmosClient_Expecter{mock: &_m.Mock} } -// BroadcastTx provides a mock function with given fields: accountName, msgs + func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) (cosmosclient.Response, error) { _va := make([]interface{}, len(msgs)) for _i := range msgs { @@ -70,14 +70,14 @@ func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) ( return r0, r1 } -// MockCosmosClient_BroadcastTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BroadcastTx' + type MockCosmosClient_BroadcastTx_Call struct { *mock.Call } -// BroadcastTx is a helper method to define mock.On call -// - accountName string -// - msgs ...types.Msg + + + func (_e *MockCosmosClient_Expecter) BroadcastTx(accountName interface{}, msgs ...interface{}) *MockCosmosClient_BroadcastTx_Call { return &MockCosmosClient_BroadcastTx_Call{Call: _e.mock.On("BroadcastTx", append([]interface{}{accountName}, msgs...)...)} @@ -106,7 +106,7 @@ func (_c *MockCosmosClient_BroadcastTx_Call) RunAndReturn(run func(string, ...ty return _c } -// Context provides a mock function with given fields: + func (_m *MockCosmosClient) Context() client.Context { ret := _m.Called() @@ -124,12 +124,12 @@ func (_m *MockCosmosClient) Context() client.Context { return r0 } -// MockCosmosClient_Context_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Context' + type MockCosmosClient_Context_Call struct { *mock.Call } -// Context is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) Context() *MockCosmosClient_Context_Call { return &MockCosmosClient_Context_Call{Call: _e.mock.On("Context")} } @@ -151,7 +151,7 @@ func (_c *MockCosmosClient_Context_Call) RunAndReturn(run func() client.Context) return _c } -// EventListenerQuit provides a mock function with given fields: + func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { ret := _m.Called() @@ -171,12 +171,12 @@ func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { return r0 } -// MockCosmosClient_EventListenerQuit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EventListenerQuit' + type MockCosmosClient_EventListenerQuit_Call struct { *mock.Call } -// EventListenerQuit is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) EventListenerQuit() *MockCosmosClient_EventListenerQuit_Call { return &MockCosmosClient_EventListenerQuit_Call{Call: _e.mock.On("EventListenerQuit")} } @@ -198,7 +198,7 @@ func (_c *MockCosmosClient_EventListenerQuit_Call) RunAndReturn(run func() <-cha return _c } -// GetAccount provides a mock function with given fields: accountName + func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Account, error) { ret := _m.Called(accountName) @@ -226,13 +226,13 @@ func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Accoun return r0, r1 } -// MockCosmosClient_GetAccount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAccount' + type MockCosmosClient_GetAccount_Call struct { *mock.Call } -// GetAccount is a helper method to define mock.On call -// - accountName string + + func (_e *MockCosmosClient_Expecter) GetAccount(accountName interface{}) *MockCosmosClient_GetAccount_Call { return &MockCosmosClient_GetAccount_Call{Call: _e.mock.On("GetAccount", accountName)} } @@ -254,7 +254,7 @@ func (_c *MockCosmosClient_GetAccount_Call) RunAndReturn(run func(string) (cosmo return _c } -// GetBalance provides a mock function with given fields: ctx, accountName, denom + func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, denom string) (*types.Coin, error) { ret := _m.Called(ctx, accountName, denom) @@ -284,15 +284,15 @@ func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, return r0, r1 } -// MockCosmosClient_GetBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBalance' + type MockCosmosClient_GetBalance_Call struct { *mock.Call } -// GetBalance is a helper method to define mock.On call -// - ctx context.Context -// - accountName string -// - denom string + + + + func (_e *MockCosmosClient_Expecter) GetBalance(ctx interface{}, accountName interface{}, denom interface{}) *MockCosmosClient_GetBalance_Call { return &MockCosmosClient_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, accountName, denom)} } @@ -314,7 +314,7 @@ func (_c *MockCosmosClient_GetBalance_Call) RunAndReturn(run func(context.Contex return _c } -// GetRollappClient provides a mock function with given fields: + func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { ret := _m.Called() @@ -334,12 +334,12 @@ func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { return r0 } -// MockCosmosClient_GetRollappClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollappClient' + type MockCosmosClient_GetRollappClient_Call struct { *mock.Call } -// GetRollappClient is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) GetRollappClient() *MockCosmosClient_GetRollappClient_Call { return &MockCosmosClient_GetRollappClient_Call{Call: _e.mock.On("GetRollappClient")} } @@ -361,7 +361,7 @@ func (_c *MockCosmosClient_GetRollappClient_Call) RunAndReturn(run func() rollap return _c } -// GetSequencerClient provides a mock function with given fields: + func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { ret := _m.Called() @@ -381,12 +381,12 @@ func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { return r0 } -// MockCosmosClient_GetSequencerClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerClient' + type MockCosmosClient_GetSequencerClient_Call struct { *mock.Call } -// GetSequencerClient is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) GetSequencerClient() *MockCosmosClient_GetSequencerClient_Call { return &MockCosmosClient_GetSequencerClient_Call{Call: _e.mock.On("GetSequencerClient")} } @@ -408,7 +408,7 @@ func (_c *MockCosmosClient_GetSequencerClient_Call) RunAndReturn(run func() sequ return _c } -// StartEventListener provides a mock function with given fields: + func (_m *MockCosmosClient) StartEventListener() error { ret := _m.Called() @@ -426,12 +426,12 @@ func (_m *MockCosmosClient) StartEventListener() error { return r0 } -// MockCosmosClient_StartEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StartEventListener' + type MockCosmosClient_StartEventListener_Call struct { *mock.Call } -// StartEventListener is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) StartEventListener() *MockCosmosClient_StartEventListener_Call { return &MockCosmosClient_StartEventListener_Call{Call: _e.mock.On("StartEventListener")} } @@ -453,7 +453,7 @@ func (_c *MockCosmosClient_StartEventListener_Call) RunAndReturn(run func() erro return _c } -// StopEventListener provides a mock function with given fields: + func (_m *MockCosmosClient) StopEventListener() error { ret := _m.Called() @@ -471,12 +471,12 @@ func (_m *MockCosmosClient) StopEventListener() error { return r0 } -// MockCosmosClient_StopEventListener_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopEventListener' + type MockCosmosClient_StopEventListener_Call struct { *mock.Call } -// StopEventListener is a helper method to define mock.On call + func (_e *MockCosmosClient_Expecter) StopEventListener() *MockCosmosClient_StopEventListener_Call { return &MockCosmosClient_StopEventListener_Call{Call: _e.mock.On("StopEventListener")} } @@ -498,7 +498,7 @@ func (_c *MockCosmosClient_StopEventListener_Call) RunAndReturn(run func() error return _c } -// SubscribeToEvents provides a mock function with given fields: ctx, subscriber, query, outCapacity + func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) for _i := range outCapacity { @@ -535,16 +535,16 @@ func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber st return r0, r1 } -// MockCosmosClient_SubscribeToEvents_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeToEvents' + type MockCosmosClient_SubscribeToEvents_Call struct { *mock.Call } -// SubscribeToEvents is a helper method to define mock.On call -// - ctx context.Context -// - subscriber string -// - query string -// - outCapacity ...int + + + + + func (_e *MockCosmosClient_Expecter) SubscribeToEvents(ctx interface{}, subscriber interface{}, query interface{}, outCapacity ...interface{}) *MockCosmosClient_SubscribeToEvents_Call { return &MockCosmosClient_SubscribeToEvents_Call{Call: _e.mock.On("SubscribeToEvents", append([]interface{}{ctx, subscriber, query}, outCapacity...)...)} @@ -573,7 +573,7 @@ func (_c *MockCosmosClient_SubscribeToEvents_Call) RunAndReturn(run func(context return _c } -// UnsubscribeAll provides a mock function with given fields: ctx, subscriber + func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) @@ -591,14 +591,14 @@ func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber strin return r0 } -// MockCosmosClient_UnsubscribeAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnsubscribeAll' + type MockCosmosClient_UnsubscribeAll_Call struct { *mock.Call } -// UnsubscribeAll is a helper method to define mock.On call -// - ctx context.Context -// - subscriber string + + + func (_e *MockCosmosClient_Expecter) UnsubscribeAll(ctx interface{}, subscriber interface{}) *MockCosmosClient_UnsubscribeAll_Call { return &MockCosmosClient_UnsubscribeAll_Call{Call: _e.mock.On("UnsubscribeAll", ctx, subscriber)} } @@ -620,8 +620,8 @@ func (_c *MockCosmosClient_UnsubscribeAll_Call) RunAndReturn(run func(context.Co return _c } -// NewMockCosmosClient creates a new instance of MockCosmosClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockCosmosClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go index a609b4d42..c41be7a74 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package settlement @@ -15,7 +15,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockClientI is an autogenerated mock type for the ClientI type + type MockClientI struct { mock.Mock } @@ -28,7 +28,7 @@ func (_m *MockClientI) EXPECT() *MockClientI_Expecter { return &MockClientI_Expecter{mock: &_m.Mock} } -// GetAllSequencers provides a mock function with given fields: + func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -58,12 +58,12 @@ func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { return r0, r1 } -// MockClientI_GetAllSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllSequencers' + type MockClientI_GetAllSequencers_Call struct { *mock.Call } -// GetAllSequencers is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetAllSequencers() *MockClientI_GetAllSequencers_Call { return &MockClientI_GetAllSequencers_Call{Call: _e.mock.On("GetAllSequencers")} } @@ -85,7 +85,7 @@ func (_c *MockClientI_GetAllSequencers_Call) RunAndReturn(run func() ([]types.Se return _c } -// GetBatchAtHeight provides a mock function with given fields: index + func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -115,13 +115,13 @@ func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetriev return r0, r1 } -// MockClientI_GetBatchAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtHeight' + type MockClientI_GetBatchAtHeight_Call struct { *mock.Call } -// GetBatchAtHeight is a helper method to define mock.On call -// - index uint64 + + func (_e *MockClientI_Expecter) GetBatchAtHeight(index interface{}) *MockClientI_GetBatchAtHeight_Call { return &MockClientI_GetBatchAtHeight_Call{Call: _e.mock.On("GetBatchAtHeight", index)} } @@ -143,7 +143,7 @@ func (_c *MockClientI_GetBatchAtHeight_Call) RunAndReturn(run func(uint64) (*set return _c } -// GetBatchAtIndex provides a mock function with given fields: index + func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -173,13 +173,13 @@ func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieve return r0, r1 } -// MockClientI_GetBatchAtIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBatchAtIndex' + type MockClientI_GetBatchAtIndex_Call struct { *mock.Call } -// GetBatchAtIndex is a helper method to define mock.On call -// - index uint64 + + func (_e *MockClientI_Expecter) GetBatchAtIndex(index interface{}) *MockClientI_GetBatchAtIndex_Call { return &MockClientI_GetBatchAtIndex_Call{Call: _e.mock.On("GetBatchAtIndex", index)} } @@ -201,7 +201,7 @@ func (_c *MockClientI_GetBatchAtIndex_Call) RunAndReturn(run func(uint64) (*sett return _c } -// GetBondedSequencers provides a mock function with given fields: + func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -231,12 +231,12 @@ func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { return r0, r1 } -// MockClientI_GetBondedSequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBondedSequencers' + type MockClientI_GetBondedSequencers_Call struct { *mock.Call } -// GetBondedSequencers is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetBondedSequencers() *MockClientI_GetBondedSequencers_Call { return &MockClientI_GetBondedSequencers_Call{Call: _e.mock.On("GetBondedSequencers")} } @@ -258,7 +258,7 @@ func (_c *MockClientI_GetBondedSequencers_Call) RunAndReturn(run func() ([]types return _c } -// GetLatestBatch provides a mock function with given fields: + func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { ret := _m.Called() @@ -288,12 +288,12 @@ func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) return r0, r1 } -// MockClientI_GetLatestBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestBatch' + type MockClientI_GetLatestBatch_Call struct { *mock.Call } -// GetLatestBatch is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestBatch() *MockClientI_GetLatestBatch_Call { return &MockClientI_GetLatestBatch_Call{Call: _e.mock.On("GetLatestBatch")} } @@ -315,7 +315,7 @@ func (_c *MockClientI_GetLatestBatch_Call) RunAndReturn(run func() (*settlement. return _c } -// GetLatestFinalizedHeight provides a mock function with given fields: + func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { ret := _m.Called() @@ -343,12 +343,12 @@ func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { return r0, r1 } -// MockClientI_GetLatestFinalizedHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestFinalizedHeight' + type MockClientI_GetLatestFinalizedHeight_Call struct { *mock.Call } -// GetLatestFinalizedHeight is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestFinalizedHeight() *MockClientI_GetLatestFinalizedHeight_Call { return &MockClientI_GetLatestFinalizedHeight_Call{Call: _e.mock.On("GetLatestFinalizedHeight")} } @@ -370,7 +370,7 @@ func (_c *MockClientI_GetLatestFinalizedHeight_Call) RunAndReturn(run func() (ui return _c } -// GetLatestHeight provides a mock function with given fields: + func (_m *MockClientI) GetLatestHeight() (uint64, error) { ret := _m.Called() @@ -398,12 +398,12 @@ func (_m *MockClientI) GetLatestHeight() (uint64, error) { return r0, r1 } -// MockClientI_GetLatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestHeight' + type MockClientI_GetLatestHeight_Call struct { *mock.Call } -// GetLatestHeight is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetLatestHeight() *MockClientI_GetLatestHeight_Call { return &MockClientI_GetLatestHeight_Call{Call: _e.mock.On("GetLatestHeight")} } @@ -425,7 +425,7 @@ func (_c *MockClientI_GetLatestHeight_Call) RunAndReturn(run func() (uint64, err return _c } -// GetNextProposer provides a mock function with given fields: + func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { ret := _m.Called() @@ -455,12 +455,12 @@ func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { return r0, r1 } -// MockClientI_GetNextProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposer' + type MockClientI_GetNextProposer_Call struct { *mock.Call } -// GetNextProposer is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetNextProposer() *MockClientI_GetNextProposer_Call { return &MockClientI_GetNextProposer_Call{Call: _e.mock.On("GetNextProposer")} } @@ -482,7 +482,7 @@ func (_c *MockClientI_GetNextProposer_Call) RunAndReturn(run func() (*types.Sequ return _c } -// GetObsoleteDrs provides a mock function with given fields: + func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { ret := _m.Called() @@ -512,12 +512,12 @@ func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { return r0, r1 } -// MockClientI_GetObsoleteDrs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetObsoleteDrs' + type MockClientI_GetObsoleteDrs_Call struct { *mock.Call } -// GetObsoleteDrs is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetObsoleteDrs() *MockClientI_GetObsoleteDrs_Call { return &MockClientI_GetObsoleteDrs_Call{Call: _e.mock.On("GetObsoleteDrs")} } @@ -539,7 +539,7 @@ func (_c *MockClientI_GetObsoleteDrs_Call) RunAndReturn(run func() ([]uint32, er return _c } -// GetProposerAtHeight provides a mock function with given fields: height + func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ret := _m.Called(height) @@ -569,13 +569,13 @@ func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, erro return r0, r1 } -// MockClientI_GetProposerAtHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerAtHeight' + type MockClientI_GetProposerAtHeight_Call struct { *mock.Call } -// GetProposerAtHeight is a helper method to define mock.On call -// - height int64 + + func (_e *MockClientI_Expecter) GetProposerAtHeight(height interface{}) *MockClientI_GetProposerAtHeight_Call { return &MockClientI_GetProposerAtHeight_Call{Call: _e.mock.On("GetProposerAtHeight", height)} } @@ -597,7 +597,7 @@ func (_c *MockClientI_GetProposerAtHeight_Call) RunAndReturn(run func(int64) (*t return _c } -// GetRollapp provides a mock function with given fields: + func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { ret := _m.Called() @@ -627,12 +627,12 @@ func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { return r0, r1 } -// MockClientI_GetRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollapp' + type MockClientI_GetRollapp_Call struct { *mock.Call } -// GetRollapp is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetRollapp() *MockClientI_GetRollapp_Call { return &MockClientI_GetRollapp_Call{Call: _e.mock.On("GetRollapp")} } @@ -654,7 +654,7 @@ func (_c *MockClientI_GetRollapp_Call) RunAndReturn(run func() (*types.Rollapp, return _c } -// GetSequencerByAddress provides a mock function with given fields: address + func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, error) { ret := _m.Called(address) @@ -682,13 +682,13 @@ func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, e return r0, r1 } -// MockClientI_GetSequencerByAddress_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequencerByAddress' + type MockClientI_GetSequencerByAddress_Call struct { *mock.Call } -// GetSequencerByAddress is a helper method to define mock.On call -// - address string + + func (_e *MockClientI_Expecter) GetSequencerByAddress(address interface{}) *MockClientI_GetSequencerByAddress_Call { return &MockClientI_GetSequencerByAddress_Call{Call: _e.mock.On("GetSequencerByAddress", address)} } @@ -710,7 +710,7 @@ func (_c *MockClientI_GetSequencerByAddress_Call) RunAndReturn(run func(string) return _c } -// GetSignerBalance provides a mock function with given fields: + func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { ret := _m.Called() @@ -738,12 +738,12 @@ func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { return r0, r1 } -// MockClientI_GetSignerBalance_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSignerBalance' + type MockClientI_GetSignerBalance_Call struct { *mock.Call } -// GetSignerBalance is a helper method to define mock.On call + func (_e *MockClientI_Expecter) GetSignerBalance() *MockClientI_GetSignerBalance_Call { return &MockClientI_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -765,7 +765,7 @@ func (_c *MockClientI_GetSignerBalance_Call) RunAndReturn(run func() (types.Bala return _c } -// Init provides a mock function with given fields: config, rollappId, _a2, logger, options + func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pubsub.Server, logger types.Logger, options ...settlement.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -790,17 +790,17 @@ func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pub return r0 } -// MockClientI_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init' + type MockClientI_Init_Call struct { *mock.Call } -// Init is a helper method to define mock.On call -// - config settlement.Config -// - rollappId string -// - _a2 *pubsub.Server -// - logger types.Logger -// - options ...settlement.Option + + + + + + func (_e *MockClientI_Expecter) Init(config interface{}, rollappId interface{}, _a2 interface{}, logger interface{}, options ...interface{}) *MockClientI_Init_Call { return &MockClientI_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, rollappId, _a2, logger}, options...)...)} @@ -829,7 +829,7 @@ func (_c *MockClientI_Init_Call) RunAndReturn(run func(settlement.Config, string return _c } -// Start provides a mock function with given fields: + func (_m *MockClientI) Start() error { ret := _m.Called() @@ -847,12 +847,12 @@ func (_m *MockClientI) Start() error { return r0 } -// MockClientI_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockClientI_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockClientI_Expecter) Start() *MockClientI_Start_Call { return &MockClientI_Start_Call{Call: _e.mock.On("Start")} } @@ -874,7 +874,7 @@ func (_c *MockClientI_Start_Call) RunAndReturn(run func() error) *MockClientI_St return _c } -// Stop provides a mock function with given fields: + func (_m *MockClientI) Stop() error { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockClientI) Stop() error { return r0 } -// MockClientI_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockClientI_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockClientI_Expecter) Stop() *MockClientI_Stop_Call { return &MockClientI_Stop_Call{Call: _e.mock.On("Stop")} } @@ -919,7 +919,7 @@ func (_c *MockClientI_Stop_Call) RunAndReturn(run func() error) *MockClientI_Sto return _c } -// SubmitBatch provides a mock function with given fields: batch, daClient, daResult + func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { ret := _m.Called(batch, daClient, daResult) @@ -937,15 +937,15 @@ func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daRes return r0 } -// MockClientI_SubmitBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubmitBatch' + type MockClientI_SubmitBatch_Call struct { *mock.Call } -// SubmitBatch is a helper method to define mock.On call -// - batch *types.Batch -// - daClient da.Client -// - daResult *da.ResultSubmitBatch + + + + func (_e *MockClientI_Expecter) SubmitBatch(batch interface{}, daClient interface{}, daResult interface{}) *MockClientI_SubmitBatch_Call { return &MockClientI_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch, daClient, daResult)} } @@ -967,7 +967,7 @@ func (_c *MockClientI_SubmitBatch_Call) RunAndReturn(run func(*types.Batch, da.C return _c } -// ValidateGenesisBridgeData provides a mock function with given fields: data + func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error { ret := _m.Called(data) @@ -985,13 +985,13 @@ func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) return r0 } -// MockClientI_ValidateGenesisBridgeData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridgeData' + type MockClientI_ValidateGenesisBridgeData_Call struct { *mock.Call } -// ValidateGenesisBridgeData is a helper method to define mock.On call -// - data rollapp.GenesisBridgeData + + func (_e *MockClientI_Expecter) ValidateGenesisBridgeData(data interface{}) *MockClientI_ValidateGenesisBridgeData_Call { return &MockClientI_ValidateGenesisBridgeData_Call{Call: _e.mock.On("ValidateGenesisBridgeData", data)} } @@ -1013,8 +1013,8 @@ func (_c *MockClientI_ValidateGenesisBridgeData_Call) RunAndReturn(run func(roll return _c } -// NewMockClientI creates a new instance of MockClientI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockClientI(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go index 5035e135f..8ee0e6d75 100644 --- a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go +++ b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package store @@ -13,7 +13,7 @@ import ( types "github.com/dymensionxyz/dymint/types" ) -// MockStore is an autogenerated mock type for the Store type + type MockStore struct { mock.Mock } @@ -26,7 +26,7 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } -// Close provides a mock function with given fields: + func (_m *MockStore) Close() error { ret := _m.Called() @@ -44,12 +44,12 @@ func (_m *MockStore) Close() error { return r0 } -// MockStore_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' + type MockStore_Close_Call struct { *mock.Call } -// Close is a helper method to define mock.On call + func (_e *MockStore_Expecter) Close() *MockStore_Close_Call { return &MockStore_Close_Call{Call: _e.mock.On("Close")} } @@ -71,7 +71,7 @@ func (_c *MockStore_Close_Call) RunAndReturn(run func() error) *MockStore_Close_ return _c } -// LoadBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadBaseHeight() (uint64, error) { ret := _m.Called() @@ -99,12 +99,12 @@ func (_m *MockStore) LoadBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBaseHeight' + type MockStore_LoadBaseHeight_Call struct { *mock.Call } -// LoadBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadBaseHeight() *MockStore_LoadBaseHeight_Call { return &MockStore_LoadBaseHeight_Call{Call: _e.mock.On("LoadBaseHeight")} } @@ -126,7 +126,7 @@ func (_c *MockStore_LoadBaseHeight_Call) RunAndReturn(run func() (uint64, error) return _c } -// LoadBlock provides a mock function with given fields: height + func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { ret := _m.Called(height) @@ -156,13 +156,13 @@ func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { return r0, r1 } -// MockStore_LoadBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlock' + type MockStore_LoadBlock_Call struct { *mock.Call } -// LoadBlock is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlock(height interface{}) *MockStore_LoadBlock_Call { return &MockStore_LoadBlock_Call{Call: _e.mock.On("LoadBlock", height)} } @@ -184,7 +184,7 @@ func (_c *MockStore_LoadBlock_Call) RunAndReturn(run func(uint64) (*types.Block, return _c } -// LoadBlockByHash provides a mock function with given fields: hash + func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { ret := _m.Called(hash) @@ -214,13 +214,13 @@ func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return r0, r1 } -// MockStore_LoadBlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockByHash' + type MockStore_LoadBlockByHash_Call struct { *mock.Call } -// LoadBlockByHash is a helper method to define mock.On call -// - hash [32]byte + + func (_e *MockStore_Expecter) LoadBlockByHash(hash interface{}) *MockStore_LoadBlockByHash_Call { return &MockStore_LoadBlockByHash_Call{Call: _e.mock.On("LoadBlockByHash", hash)} } @@ -242,7 +242,7 @@ func (_c *MockStore_LoadBlockByHash_Call) RunAndReturn(run func([32]byte) (*type return _c } -// LoadBlockCid provides a mock function with given fields: height + func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { ret := _m.Called(height) @@ -270,13 +270,13 @@ func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { return r0, r1 } -// MockStore_LoadBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockCid' + type MockStore_LoadBlockCid_Call struct { *mock.Call } -// LoadBlockCid is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockCid(height interface{}) *MockStore_LoadBlockCid_Call { return &MockStore_LoadBlockCid_Call{Call: _e.mock.On("LoadBlockCid", height)} } @@ -298,7 +298,7 @@ func (_c *MockStore_LoadBlockCid_Call) RunAndReturn(run func(uint64) (cid.Cid, e return _c } -// LoadBlockResponses provides a mock function with given fields: height + func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, error) { ret := _m.Called(height) @@ -328,13 +328,13 @@ func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, er return r0, r1 } -// MockStore_LoadBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockResponses' + type MockStore_LoadBlockResponses_Call struct { *mock.Call } -// LoadBlockResponses is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockResponses(height interface{}) *MockStore_LoadBlockResponses_Call { return &MockStore_LoadBlockResponses_Call{Call: _e.mock.On("LoadBlockResponses", height)} } @@ -356,7 +356,7 @@ func (_c *MockStore_LoadBlockResponses_Call) RunAndReturn(run func(uint64) (*sta return _c } -// LoadBlockSource provides a mock function with given fields: height + func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { ret := _m.Called(height) @@ -384,13 +384,13 @@ func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { return r0, r1 } -// MockStore_LoadBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSource' + type MockStore_LoadBlockSource_Call struct { *mock.Call } -// LoadBlockSource is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadBlockSource(height interface{}) *MockStore_LoadBlockSource_Call { return &MockStore_LoadBlockSource_Call{Call: _e.mock.On("LoadBlockSource", height)} } @@ -412,7 +412,7 @@ func (_c *MockStore_LoadBlockSource_Call) RunAndReturn(run func(uint64) (types.B return _c } -// LoadBlockSyncBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { ret := _m.Called() @@ -440,12 +440,12 @@ func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadBlockSyncBaseHeight' + type MockStore_LoadBlockSyncBaseHeight_Call struct { *mock.Call } -// LoadBlockSyncBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadBlockSyncBaseHeight() *MockStore_LoadBlockSyncBaseHeight_Call { return &MockStore_LoadBlockSyncBaseHeight_Call{Call: _e.mock.On("LoadBlockSyncBaseHeight")} } @@ -467,7 +467,7 @@ func (_c *MockStore_LoadBlockSyncBaseHeight_Call) RunAndReturn(run func() (uint6 return _c } -// LoadCommit provides a mock function with given fields: height + func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { ret := _m.Called(height) @@ -497,13 +497,13 @@ func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { return r0, r1 } -// MockStore_LoadCommit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommit' + type MockStore_LoadCommit_Call struct { *mock.Call } -// LoadCommit is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadCommit(height interface{}) *MockStore_LoadCommit_Call { return &MockStore_LoadCommit_Call{Call: _e.mock.On("LoadCommit", height)} } @@ -525,7 +525,7 @@ func (_c *MockStore_LoadCommit_Call) RunAndReturn(run func(uint64) (*types.Commi return _c } -// LoadCommitByHash provides a mock function with given fields: hash + func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { ret := _m.Called(hash) @@ -555,13 +555,13 @@ func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return r0, r1 } -// MockStore_LoadCommitByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadCommitByHash' + type MockStore_LoadCommitByHash_Call struct { *mock.Call } -// LoadCommitByHash is a helper method to define mock.On call -// - hash [32]byte + + func (_e *MockStore_Expecter) LoadCommitByHash(hash interface{}) *MockStore_LoadCommitByHash_Call { return &MockStore_LoadCommitByHash_Call{Call: _e.mock.On("LoadCommitByHash", hash)} } @@ -583,7 +583,7 @@ func (_c *MockStore_LoadCommitByHash_Call) RunAndReturn(run func([32]byte) (*typ return _c } -// LoadDRSVersion provides a mock function with given fields: height + func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { ret := _m.Called(height) @@ -611,13 +611,13 @@ func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { return r0, r1 } -// MockStore_LoadDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadDRSVersion' + type MockStore_LoadDRSVersion_Call struct { *mock.Call } -// LoadDRSVersion is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadDRSVersion(height interface{}) *MockStore_LoadDRSVersion_Call { return &MockStore_LoadDRSVersion_Call{Call: _e.mock.On("LoadDRSVersion", height)} } @@ -639,7 +639,7 @@ func (_c *MockStore_LoadDRSVersion_Call) RunAndReturn(run func(uint64) (uint32, return _c } -// LoadIndexerBaseHeight provides a mock function with given fields: + func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { ret := _m.Called() @@ -667,12 +667,12 @@ func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadIndexerBaseHeight' + type MockStore_LoadIndexerBaseHeight_Call struct { *mock.Call } -// LoadIndexerBaseHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadIndexerBaseHeight() *MockStore_LoadIndexerBaseHeight_Call { return &MockStore_LoadIndexerBaseHeight_Call{Call: _e.mock.On("LoadIndexerBaseHeight")} } @@ -694,7 +694,7 @@ func (_c *MockStore_LoadIndexerBaseHeight_Call) RunAndReturn(run func() (uint64, return _c } -// LoadLastBlockSequencerSet provides a mock function with given fields: + func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { ret := _m.Called() @@ -724,12 +724,12 @@ func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { return r0, r1 } -// MockStore_LoadLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadLastBlockSequencerSet' + type MockStore_LoadLastBlockSequencerSet_Call struct { *mock.Call } -// LoadLastBlockSequencerSet is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadLastBlockSequencerSet() *MockStore_LoadLastBlockSequencerSet_Call { return &MockStore_LoadLastBlockSequencerSet_Call{Call: _e.mock.On("LoadLastBlockSequencerSet")} } @@ -751,7 +751,7 @@ func (_c *MockStore_LoadLastBlockSequencerSet_Call) RunAndReturn(run func() (typ return _c } -// LoadProposer provides a mock function with given fields: height + func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { ret := _m.Called(height) @@ -779,13 +779,13 @@ func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { return r0, r1 } -// MockStore_LoadProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadProposer' + type MockStore_LoadProposer_Call struct { *mock.Call } -// LoadProposer is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) LoadProposer(height interface{}) *MockStore_LoadProposer_Call { return &MockStore_LoadProposer_Call{Call: _e.mock.On("LoadProposer", height)} } @@ -807,7 +807,7 @@ func (_c *MockStore_LoadProposer_Call) RunAndReturn(run func(uint64) (types.Sequ return _c } -// LoadState provides a mock function with given fields: + func (_m *MockStore) LoadState() (*types.State, error) { ret := _m.Called() @@ -837,12 +837,12 @@ func (_m *MockStore) LoadState() (*types.State, error) { return r0, r1 } -// MockStore_LoadState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadState' + type MockStore_LoadState_Call struct { *mock.Call } -// LoadState is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadState() *MockStore_LoadState_Call { return &MockStore_LoadState_Call{Call: _e.mock.On("LoadState")} } @@ -864,7 +864,7 @@ func (_c *MockStore_LoadState_Call) RunAndReturn(run func() (*types.State, error return _c } -// LoadValidationHeight provides a mock function with given fields: + func (_m *MockStore) LoadValidationHeight() (uint64, error) { ret := _m.Called() @@ -892,12 +892,12 @@ func (_m *MockStore) LoadValidationHeight() (uint64, error) { return r0, r1 } -// MockStore_LoadValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadValidationHeight' + type MockStore_LoadValidationHeight_Call struct { *mock.Call } -// LoadValidationHeight is a helper method to define mock.On call + func (_e *MockStore_Expecter) LoadValidationHeight() *MockStore_LoadValidationHeight_Call { return &MockStore_LoadValidationHeight_Call{Call: _e.mock.On("LoadValidationHeight")} } @@ -919,7 +919,7 @@ func (_c *MockStore_LoadValidationHeight_Call) RunAndReturn(run func() (uint64, return _c } -// NewBatch provides a mock function with given fields: + func (_m *MockStore) NewBatch() store.KVBatch { ret := _m.Called() @@ -939,12 +939,12 @@ func (_m *MockStore) NewBatch() store.KVBatch { return r0 } -// MockStore_NewBatch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewBatch' + type MockStore_NewBatch_Call struct { *mock.Call } -// NewBatch is a helper method to define mock.On call + func (_e *MockStore_Expecter) NewBatch() *MockStore_NewBatch_Call { return &MockStore_NewBatch_Call{Call: _e.mock.On("NewBatch")} } @@ -966,7 +966,7 @@ func (_c *MockStore_NewBatch_Call) RunAndReturn(run func() store.KVBatch) *MockS return _c } -// PruneStore provides a mock function with given fields: to, logger + func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { ret := _m.Called(to, logger) @@ -994,14 +994,14 @@ func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) return r0, r1 } -// MockStore_PruneStore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PruneStore' + type MockStore_PruneStore_Call struct { *mock.Call } -// PruneStore is a helper method to define mock.On call -// - to uint64 -// - logger types.Logger + + + func (_e *MockStore_Expecter) PruneStore(to interface{}, logger interface{}) *MockStore_PruneStore_Call { return &MockStore_PruneStore_Call{Call: _e.mock.On("PruneStore", to, logger)} } @@ -1023,7 +1023,7 @@ func (_c *MockStore_PruneStore_Call) RunAndReturn(run func(uint64, types.Logger) return _c } -// RemoveBlockCid provides a mock function with given fields: height + func (_m *MockStore) RemoveBlockCid(height uint64) error { ret := _m.Called(height) @@ -1041,13 +1041,13 @@ func (_m *MockStore) RemoveBlockCid(height uint64) error { return r0 } -// MockStore_RemoveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveBlockCid' + type MockStore_RemoveBlockCid_Call struct { *mock.Call } -// RemoveBlockCid is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) RemoveBlockCid(height interface{}) *MockStore_RemoveBlockCid_Call { return &MockStore_RemoveBlockCid_Call{Call: _e.mock.On("RemoveBlockCid", height)} } @@ -1069,7 +1069,7 @@ func (_c *MockStore_RemoveBlockCid_Call) RunAndReturn(run func(uint64) error) *M return _c } -// SaveBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1087,13 +1087,13 @@ func (_m *MockStore) SaveBaseHeight(height uint64) error { return r0 } -// MockStore_SaveBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBaseHeight' + type MockStore_SaveBaseHeight_Call struct { *mock.Call } -// SaveBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveBaseHeight(height interface{}) *MockStore_SaveBaseHeight_Call { return &MockStore_SaveBaseHeight_Call{Call: _e.mock.On("SaveBaseHeight", height)} } @@ -1115,7 +1115,7 @@ func (_c *MockStore_SaveBaseHeight_Call) RunAndReturn(run func(uint64) error) *M return _c } -// SaveBlock provides a mock function with given fields: block, commit, batch + func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(block, commit, batch) @@ -1145,15 +1145,15 @@ func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch s return r0, r1 } -// MockStore_SaveBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlock' + type MockStore_SaveBlock_Call struct { *mock.Call } -// SaveBlock is a helper method to define mock.On call -// - block *types.Block -// - commit *types.Commit -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlock(block interface{}, commit interface{}, batch interface{}) *MockStore_SaveBlock_Call { return &MockStore_SaveBlock_Call{Call: _e.mock.On("SaveBlock", block, commit, batch)} } @@ -1175,7 +1175,7 @@ func (_c *MockStore_SaveBlock_Call) RunAndReturn(run func(*types.Block, *types.C return _c } -// SaveBlockCid provides a mock function with given fields: height, _a1, batch + func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, _a1, batch) @@ -1205,15 +1205,15 @@ func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatc return r0, r1 } -// MockStore_SaveBlockCid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockCid' + type MockStore_SaveBlockCid_Call struct { *mock.Call } -// SaveBlockCid is a helper method to define mock.On call -// - height uint64 -// - _a1 cid.Cid -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockCid(height interface{}, _a1 interface{}, batch interface{}) *MockStore_SaveBlockCid_Call { return &MockStore_SaveBlockCid_Call{Call: _e.mock.On("SaveBlockCid", height, _a1, batch)} } @@ -1235,7 +1235,7 @@ func (_c *MockStore_SaveBlockCid_Call) RunAndReturn(run func(uint64, cid.Cid, st return _c } -// SaveBlockResponses provides a mock function with given fields: height, responses, batch + func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResponses, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, responses, batch) @@ -1265,15 +1265,15 @@ func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResp return r0, r1 } -// MockStore_SaveBlockResponses_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockResponses' + type MockStore_SaveBlockResponses_Call struct { *mock.Call } -// SaveBlockResponses is a helper method to define mock.On call -// - height uint64 -// - responses *state.ABCIResponses -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockResponses(height interface{}, responses interface{}, batch interface{}) *MockStore_SaveBlockResponses_Call { return &MockStore_SaveBlockResponses_Call{Call: _e.mock.On("SaveBlockResponses", height, responses, batch)} } @@ -1295,7 +1295,7 @@ func (_c *MockStore_SaveBlockResponses_Call) RunAndReturn(run func(uint64, *stat return _c } -// SaveBlockSource provides a mock function with given fields: height, source, batch + func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, source, batch) @@ -1325,15 +1325,15 @@ func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, ba return r0, r1 } -// MockStore_SaveBlockSource_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSource' + type MockStore_SaveBlockSource_Call struct { *mock.Call } -// SaveBlockSource is a helper method to define mock.On call -// - height uint64 -// - source types.BlockSource -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveBlockSource(height interface{}, source interface{}, batch interface{}) *MockStore_SaveBlockSource_Call { return &MockStore_SaveBlockSource_Call{Call: _e.mock.On("SaveBlockSource", height, source, batch)} } @@ -1355,7 +1355,7 @@ func (_c *MockStore_SaveBlockSource_Call) RunAndReturn(run func(uint64, types.Bl return _c } -// SaveBlockSyncBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1373,13 +1373,13 @@ func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { return r0 } -// MockStore_SaveBlockSyncBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlockSyncBaseHeight' + type MockStore_SaveBlockSyncBaseHeight_Call struct { *mock.Call } -// SaveBlockSyncBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveBlockSyncBaseHeight(height interface{}) *MockStore_SaveBlockSyncBaseHeight_Call { return &MockStore_SaveBlockSyncBaseHeight_Call{Call: _e.mock.On("SaveBlockSyncBaseHeight", height)} } @@ -1401,7 +1401,7 @@ func (_c *MockStore_SaveBlockSyncBaseHeight_Call) RunAndReturn(run func(uint64) return _c } -// SaveDRSVersion provides a mock function with given fields: height, version, batch + func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, version, batch) @@ -1431,15 +1431,15 @@ func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.K return r0, r1 } -// MockStore_SaveDRSVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveDRSVersion' + type MockStore_SaveDRSVersion_Call struct { *mock.Call } -// SaveDRSVersion is a helper method to define mock.On call -// - height uint64 -// - version uint32 -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveDRSVersion(height interface{}, version interface{}, batch interface{}) *MockStore_SaveDRSVersion_Call { return &MockStore_SaveDRSVersion_Call{Call: _e.mock.On("SaveDRSVersion", height, version, batch)} } @@ -1461,7 +1461,7 @@ func (_c *MockStore_SaveDRSVersion_Call) RunAndReturn(run func(uint64, uint32, s return _c } -// SaveIndexerBaseHeight provides a mock function with given fields: height + func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1479,13 +1479,13 @@ func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { return r0 } -// MockStore_SaveIndexerBaseHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveIndexerBaseHeight' + type MockStore_SaveIndexerBaseHeight_Call struct { *mock.Call } -// SaveIndexerBaseHeight is a helper method to define mock.On call -// - height uint64 + + func (_e *MockStore_Expecter) SaveIndexerBaseHeight(height interface{}) *MockStore_SaveIndexerBaseHeight_Call { return &MockStore_SaveIndexerBaseHeight_Call{Call: _e.mock.On("SaveIndexerBaseHeight", height)} } @@ -1507,7 +1507,7 @@ func (_c *MockStore_SaveIndexerBaseHeight_Call) RunAndReturn(run func(uint64) er return _c } -// SaveLastBlockSequencerSet provides a mock function with given fields: sequencers, batch + func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(sequencers, batch) @@ -1537,14 +1537,14 @@ func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batc return r0, r1 } -// MockStore_SaveLastBlockSequencerSet_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastBlockSequencerSet' + type MockStore_SaveLastBlockSequencerSet_Call struct { *mock.Call } -// SaveLastBlockSequencerSet is a helper method to define mock.On call -// - sequencers types.Sequencers -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveLastBlockSequencerSet(sequencers interface{}, batch interface{}) *MockStore_SaveLastBlockSequencerSet_Call { return &MockStore_SaveLastBlockSequencerSet_Call{Call: _e.mock.On("SaveLastBlockSequencerSet", sequencers, batch)} } @@ -1566,7 +1566,7 @@ func (_c *MockStore_SaveLastBlockSequencerSet_Call) RunAndReturn(run func(types. return _c } -// SaveProposer provides a mock function with given fields: height, proposer, batch + func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, proposer, batch) @@ -1596,15 +1596,15 @@ func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch return r0, r1 } -// MockStore_SaveProposer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveProposer' + type MockStore_SaveProposer_Call struct { *mock.Call } -// SaveProposer is a helper method to define mock.On call -// - height uint64 -// - proposer types.Sequencer -// - batch store.KVBatch + + + + func (_e *MockStore_Expecter) SaveProposer(height interface{}, proposer interface{}, batch interface{}) *MockStore_SaveProposer_Call { return &MockStore_SaveProposer_Call{Call: _e.mock.On("SaveProposer", height, proposer, batch)} } @@ -1626,7 +1626,7 @@ func (_c *MockStore_SaveProposer_Call) RunAndReturn(run func(uint64, types.Seque return _c } -// SaveState provides a mock function with given fields: _a0, batch + func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(_a0, batch) @@ -1656,14 +1656,14 @@ func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVB return r0, r1 } -// MockStore_SaveState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveState' + type MockStore_SaveState_Call struct { *mock.Call } -// SaveState is a helper method to define mock.On call -// - _a0 *types.State -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveState(_a0 interface{}, batch interface{}) *MockStore_SaveState_Call { return &MockStore_SaveState_Call{Call: _e.mock.On("SaveState", _a0, batch)} } @@ -1685,7 +1685,7 @@ func (_c *MockStore_SaveState_Call) RunAndReturn(run func(*types.State, store.KV return _c } -// SaveValidationHeight provides a mock function with given fields: height, batch + func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, batch) @@ -1715,14 +1715,14 @@ func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (s return r0, r1 } -// MockStore_SaveValidationHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveValidationHeight' + type MockStore_SaveValidationHeight_Call struct { *mock.Call } -// SaveValidationHeight is a helper method to define mock.On call -// - height uint64 -// - batch store.KVBatch + + + func (_e *MockStore_Expecter) SaveValidationHeight(height interface{}, batch interface{}) *MockStore_SaveValidationHeight_Call { return &MockStore_SaveValidationHeight_Call{Call: _e.mock.On("SaveValidationHeight", height, batch)} } @@ -1744,8 +1744,8 @@ func (_c *MockStore_SaveValidationHeight_Call) RunAndReturn(run func(uint64, sto return _c } -// NewMockStore creates a new instance of MockStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockStore(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go index c2ce005b5..775ec233d 100644 --- a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -12,7 +12,7 @@ import ( types "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *types.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *typ return r0, r1 } -// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' + type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } -// GetNextProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetNextProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } -// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.Q return r0, r1 } -// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' + type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } -// GetProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequest, opts ...grpc.CallOption) (*types.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequ return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *t return _c } -// Sequencer provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequencerRequest, opts ...grpc.CallOption) (*types.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequ return r0, r1 } -// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' + type MockQueryClient_Sequencer_Call struct { *mock.Call } -// Sequencer is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencerRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequencersRequest, opts ...grpc.CallOption) (*types.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequen return r0, r1 } -// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' + type MockQueryClient_Sequencers_Call struct { *mock.Call } -// Sequencers is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QuerySequencersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } -// SequencersByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.Qu return r0, r1 } -// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' + type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } -// SequencersByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencersByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } -// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *types.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } -// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' + type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } -// SequencersByRollappByStatus is a helper method to define mock.On call -// - ctx context.Context -// - in *types.QueryGetSequencersByRollappByStatusRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -543,8 +543,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go index 80d7ab986..c73eb7ea5 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package rollapp @@ -12,7 +12,7 @@ import ( rollapp "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// LatestHeight provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGetLatestHeightRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestHeightResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGe return r0, r1 } -// MockQueryClient_LatestHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestHeight' + type MockQueryClient_LatestHeight_Call struct { *mock.Call } -// LatestHeight is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetLatestHeightRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) LatestHeight(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestHeight_Call { return &MockQueryClient_LatestHeight_Call{Call: _e.mock.On("LatestHeight", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_LatestHeight_Call) RunAndReturn(run func(context.Conte return _c } -// LatestStateIndex provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.QueryGetLatestStateIndexRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestStateIndexResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.Que return r0, r1 } -// MockQueryClient_LatestStateIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LatestStateIndex' + type MockQueryClient_LatestStateIndex_Call struct { *mock.Call } -// LatestStateIndex is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetLatestStateIndexRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) LatestStateIndex(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestStateIndex_Call { return &MockQueryClient_LatestStateIndex_Call{Call: _e.mock.On("LatestStateIndex", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_LatestStateIndex_Call) RunAndReturn(run func(context.C return _c } -// ObsoleteDRSVersions provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp.QueryObsoleteDRSVersionsRequest, opts ...grpc.CallOption) (*rollapp.QueryObsoleteDRSVersionsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp. return r0, r1 } -// MockQueryClient_ObsoleteDRSVersions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ObsoleteDRSVersions' + type MockQueryClient_ObsoleteDRSVersions_Call struct { *mock.Call } -// ObsoleteDRSVersions is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryObsoleteDRSVersionsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) ObsoleteDRSVersions(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ObsoleteDRSVersions_Call { return &MockQueryClient_ObsoleteDRSVersions_Call{Call: _e.mock.On("ObsoleteDRSVersions", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_ObsoleteDRSVersions_Call) RunAndReturn(run func(contex return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRequest, opts ...grpc.CallOption) (*rollapp.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRe return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *r return _c } -// RegisteredDenoms provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.QueryRegisteredDenomsRequest, opts ...grpc.CallOption) (*rollapp.QueryRegisteredDenomsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.Que return r0, r1 } -// MockQueryClient_RegisteredDenoms_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisteredDenoms' + type MockQueryClient_RegisteredDenoms_Call struct { *mock.Call } -// RegisteredDenoms is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryRegisteredDenomsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RegisteredDenoms(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RegisteredDenoms_Call { return &MockQueryClient_RegisteredDenoms_Call{Call: _e.mock.On("RegisteredDenoms", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_RegisteredDenoms_Call) RunAndReturn(run func(context.C return _c } -// Rollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRoll return r0, r1 } -// MockQueryClient_Rollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rollapp' + type MockQueryClient_Rollapp_Call struct { *mock.Call } -// Rollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Rollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Rollapp_Call { return &MockQueryClient_Rollapp_Call{Call: _e.mock.On("Rollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Rollapp_Call) RunAndReturn(run func(context.Context, * return _c } -// RollappAll provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryAllRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllR return r0, r1 } -// MockQueryClient_RollappAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappAll' + type MockQueryClient_RollappAll_Call struct { *mock.Call } -// RollappAll is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryAllRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RollappAll(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappAll_Call { return &MockQueryClient_RollappAll_Call{Call: _e.mock.On("RollappAll", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_RollappAll_Call) RunAndReturn(run func(context.Context return _c } -// RollappByEIP155 provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.QueryGetRollappByEIP155Request, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.Quer return r0, r1 } -// MockQueryClient_RollappByEIP155_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RollappByEIP155' + type MockQueryClient_RollappByEIP155_Call struct { *mock.Call } -// RollappByEIP155 is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetRollappByEIP155Request -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) RollappByEIP155(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappByEIP155_Call { return &MockQueryClient_RollappByEIP155_Call{Call: _e.mock.On("RollappByEIP155", append([]interface{}{ctx, in}, opts...)...)} @@ -617,7 +617,7 @@ func (_c *MockQueryClient_RollappByEIP155_Call) RunAndReturn(run func(context.Co return _c } -// StateInfo provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetStateInfoRequest, opts ...grpc.CallOption) (*rollapp.QueryGetStateInfoResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -654,15 +654,15 @@ func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetSt return r0, r1 } -// MockQueryClient_StateInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StateInfo' + type MockQueryClient_StateInfo_Call struct { *mock.Call } -// StateInfo is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryGetStateInfoRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) StateInfo(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_StateInfo_Call { return &MockQueryClient_StateInfo_Call{Call: _e.mock.On("StateInfo", append([]interface{}{ctx, in}, opts...)...)} @@ -691,7 +691,7 @@ func (_c *MockQueryClient_StateInfo_Call) RunAndReturn(run func(context.Context, return _c } -// ValidateGenesisBridge provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollapp.QueryValidateGenesisBridgeRequest, opts ...grpc.CallOption) (*rollapp.QueryValidateGenesisBridgeResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -728,15 +728,15 @@ func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollap return r0, r1 } -// MockQueryClient_ValidateGenesisBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ValidateGenesisBridge' + type MockQueryClient_ValidateGenesisBridge_Call struct { *mock.Call } -// ValidateGenesisBridge is a helper method to define mock.On call -// - ctx context.Context -// - in *rollapp.QueryValidateGenesisBridgeRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) ValidateGenesisBridge(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ValidateGenesisBridge_Call { return &MockQueryClient_ValidateGenesisBridge_Call{Call: _e.mock.On("ValidateGenesisBridge", append([]interface{}{ctx, in}, opts...)...)} @@ -765,8 +765,8 @@ func (_c *MockQueryClient_ValidateGenesisBridge_Call) RunAndReturn(run func(cont return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go index af5bcaf4b..0b76b1a9b 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package sequencer @@ -12,7 +12,7 @@ import ( sequencer "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// MockQueryClient is an autogenerated mock type for the QueryClient type + type MockQueryClient struct { mock.Mock } @@ -25,7 +25,7 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } -// GetNextProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *sequencer.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +62,15 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *seq return r0, r1 } -// MockQueryClient_GetNextProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNextProposerByRollapp' + type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } -// GetNextProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetNextProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +99,7 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } -// GetProposerByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequencer.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +136,15 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequenc return r0, r1 } -// MockQueryClient_GetProposerByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProposerByRollapp' + type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } -// GetProposerByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetProposerByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +173,7 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } -// Params provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParamsRequest, opts ...grpc.CallOption) (*sequencer.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +210,15 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParams return r0, r1 } -// MockQueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' + type MockQueryClient_Params_Call struct { *mock.Call } -// Params is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryParamsRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +247,7 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *s return _c } -// Proposers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryProposersRequest, opts ...grpc.CallOption) (*sequencer.QueryProposersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +284,15 @@ func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryPro return r0, r1 } -// MockQueryClient_Proposers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Proposers' + type MockQueryClient_Proposers_Call struct { *mock.Call } -// Proposers is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryProposersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Proposers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Proposers_Call { return &MockQueryClient_Proposers_Call{Call: _e.mock.On("Proposers", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +321,7 @@ func (_c *MockQueryClient_Proposers_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencer provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGetSequencerRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +358,15 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGet return r0, r1 } -// MockQueryClient_Sequencer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencer' + type MockQueryClient_Sequencer_Call struct { *mock.Call } -// Sequencer is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencerRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +395,7 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } -// Sequencers provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySequencersRequest, opts ...grpc.CallOption) (*sequencer.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +432,15 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySe return r0, r1 } -// MockQueryClient_Sequencers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sequencers' + type MockQueryClient_Sequencers_Call struct { *mock.Call } -// Sequencers is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QuerySequencersRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("Sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +469,7 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } -// SequencersByRollapp provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequencer.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +506,15 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequence return r0, r1 } -// MockQueryClient_SequencersByRollapp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollapp' + type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } -// SequencersByRollapp is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencersByRollappRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +543,7 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } -// SequencersByRollappByStatus provides a mock function with given fields: ctx, in, opts + func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *sequencer.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +580,15 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } -// MockQueryClient_SequencersByRollappByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SequencersByRollappByStatus' + type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } -// SequencersByRollappByStatus is a helper method to define mock.On call -// - ctx context.Context -// - in *sequencer.QueryGetSequencersByRollappByStatusRequest -// - opts ...grpc.CallOption + + + + func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -617,8 +617,8 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } -// NewMockQueryClient creates a new instance of MockQueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go index 7393ef94e..db13fb1e2 100644 --- a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go +++ b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package types @@ -7,7 +7,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) -// MockApplication is an autogenerated mock type for the Application type + type MockApplication struct { mock.Mock } @@ -20,7 +20,7 @@ func (_m *MockApplication) EXPECT() *MockApplication_Expecter { return &MockApplication_Expecter{mock: &_m.Mock} } -// ApplySnapshotChunk provides a mock function with given fields: _a0 + func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { ret := _m.Called(_a0) @@ -38,13 +38,13 @@ func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChun return r0 } -// MockApplication_ApplySnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ApplySnapshotChunk' + type MockApplication_ApplySnapshotChunk_Call struct { *mock.Call } -// ApplySnapshotChunk is a helper method to define mock.On call -// - _a0 types.RequestApplySnapshotChunk + + func (_e *MockApplication_Expecter) ApplySnapshotChunk(_a0 interface{}) *MockApplication_ApplySnapshotChunk_Call { return &MockApplication_ApplySnapshotChunk_Call{Call: _e.mock.On("ApplySnapshotChunk", _a0)} } @@ -66,7 +66,7 @@ func (_c *MockApplication_ApplySnapshotChunk_Call) RunAndReturn(run func(types.R return _c } -// BeginBlock provides a mock function with given fields: _a0 + func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock { ret := _m.Called(_a0) @@ -84,13 +84,13 @@ func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.Respons return r0 } -// MockApplication_BeginBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlock' + type MockApplication_BeginBlock_Call struct { *mock.Call } -// BeginBlock is a helper method to define mock.On call -// - _a0 types.RequestBeginBlock + + func (_e *MockApplication_Expecter) BeginBlock(_a0 interface{}) *MockApplication_BeginBlock_Call { return &MockApplication_BeginBlock_Call{Call: _e.mock.On("BeginBlock", _a0)} } @@ -112,7 +112,7 @@ func (_c *MockApplication_BeginBlock_Call) RunAndReturn(run func(types.RequestBe return _c } -// CheckTx provides a mock function with given fields: _a0 + func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx { ret := _m.Called(_a0) @@ -130,13 +130,13 @@ func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheck return r0 } -// MockApplication_CheckTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckTx' + type MockApplication_CheckTx_Call struct { *mock.Call } -// CheckTx is a helper method to define mock.On call -// - _a0 types.RequestCheckTx + + func (_e *MockApplication_Expecter) CheckTx(_a0 interface{}) *MockApplication_CheckTx_Call { return &MockApplication_CheckTx_Call{Call: _e.mock.On("CheckTx", _a0)} } @@ -158,7 +158,7 @@ func (_c *MockApplication_CheckTx_Call) RunAndReturn(run func(types.RequestCheck return _c } -// Commit provides a mock function with given fields: + func (_m *MockApplication) Commit() types.ResponseCommit { ret := _m.Called() @@ -176,12 +176,12 @@ func (_m *MockApplication) Commit() types.ResponseCommit { return r0 } -// MockApplication_Commit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Commit' + type MockApplication_Commit_Call struct { *mock.Call } -// Commit is a helper method to define mock.On call + func (_e *MockApplication_Expecter) Commit() *MockApplication_Commit_Call { return &MockApplication_Commit_Call{Call: _e.mock.On("Commit")} } @@ -203,7 +203,7 @@ func (_c *MockApplication_Commit_Call) RunAndReturn(run func() types.ResponseCom return _c } -// DeliverTx provides a mock function with given fields: _a0 + func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx { ret := _m.Called(_a0) @@ -221,13 +221,13 @@ func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseD return r0 } -// MockApplication_DeliverTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTx' + type MockApplication_DeliverTx_Call struct { *mock.Call } -// DeliverTx is a helper method to define mock.On call -// - _a0 types.RequestDeliverTx + + func (_e *MockApplication_Expecter) DeliverTx(_a0 interface{}) *MockApplication_DeliverTx_Call { return &MockApplication_DeliverTx_Call{Call: _e.mock.On("DeliverTx", _a0)} } @@ -249,7 +249,7 @@ func (_c *MockApplication_DeliverTx_Call) RunAndReturn(run func(types.RequestDel return _c } -// EndBlock provides a mock function with given fields: _a0 + func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock { ret := _m.Called(_a0) @@ -267,13 +267,13 @@ func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEnd return r0 } -// MockApplication_EndBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlock' + type MockApplication_EndBlock_Call struct { *mock.Call } -// EndBlock is a helper method to define mock.On call -// - _a0 types.RequestEndBlock + + func (_e *MockApplication_Expecter) EndBlock(_a0 interface{}) *MockApplication_EndBlock_Call { return &MockApplication_EndBlock_Call{Call: _e.mock.On("EndBlock", _a0)} } @@ -295,7 +295,7 @@ func (_c *MockApplication_EndBlock_Call) RunAndReturn(run func(types.RequestEndB return _c } -// Info provides a mock function with given fields: _a0 + func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { ret := _m.Called(_a0) @@ -313,13 +313,13 @@ func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { return r0 } -// MockApplication_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' + type MockApplication_Info_Call struct { *mock.Call } -// Info is a helper method to define mock.On call -// - _a0 types.RequestInfo + + func (_e *MockApplication_Expecter) Info(_a0 interface{}) *MockApplication_Info_Call { return &MockApplication_Info_Call{Call: _e.mock.On("Info", _a0)} } @@ -341,7 +341,7 @@ func (_c *MockApplication_Info_Call) RunAndReturn(run func(types.RequestInfo) ty return _c } -// InitChain provides a mock function with given fields: _a0 + func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain { ret := _m.Called(_a0) @@ -359,13 +359,13 @@ func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseI return r0 } -// MockApplication_InitChain_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChain' + type MockApplication_InitChain_Call struct { *mock.Call } -// InitChain is a helper method to define mock.On call -// - _a0 types.RequestInitChain + + func (_e *MockApplication_Expecter) InitChain(_a0 interface{}) *MockApplication_InitChain_Call { return &MockApplication_InitChain_Call{Call: _e.mock.On("InitChain", _a0)} } @@ -387,7 +387,7 @@ func (_c *MockApplication_InitChain_Call) RunAndReturn(run func(types.RequestIni return _c } -// ListSnapshots provides a mock function with given fields: _a0 + func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots { ret := _m.Called(_a0) @@ -405,13 +405,13 @@ func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.R return r0 } -// MockApplication_ListSnapshots_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListSnapshots' + type MockApplication_ListSnapshots_Call struct { *mock.Call } -// ListSnapshots is a helper method to define mock.On call -// - _a0 types.RequestListSnapshots + + func (_e *MockApplication_Expecter) ListSnapshots(_a0 interface{}) *MockApplication_ListSnapshots_Call { return &MockApplication_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0)} } @@ -433,7 +433,7 @@ func (_c *MockApplication_ListSnapshots_Call) RunAndReturn(run func(types.Reques return _c } -// LoadSnapshotChunk provides a mock function with given fields: _a0 + func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { ret := _m.Called(_a0) @@ -451,13 +451,13 @@ func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) return r0 } -// MockApplication_LoadSnapshotChunk_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LoadSnapshotChunk' + type MockApplication_LoadSnapshotChunk_Call struct { *mock.Call } -// LoadSnapshotChunk is a helper method to define mock.On call -// - _a0 types.RequestLoadSnapshotChunk + + func (_e *MockApplication_Expecter) LoadSnapshotChunk(_a0 interface{}) *MockApplication_LoadSnapshotChunk_Call { return &MockApplication_LoadSnapshotChunk_Call{Call: _e.mock.On("LoadSnapshotChunk", _a0)} } @@ -479,7 +479,7 @@ func (_c *MockApplication_LoadSnapshotChunk_Call) RunAndReturn(run func(types.Re return _c } -// OfferSnapshot provides a mock function with given fields: _a0 + func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot { ret := _m.Called(_a0) @@ -497,13 +497,13 @@ func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.R return r0 } -// MockApplication_OfferSnapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OfferSnapshot' + type MockApplication_OfferSnapshot_Call struct { *mock.Call } -// OfferSnapshot is a helper method to define mock.On call -// - _a0 types.RequestOfferSnapshot + + func (_e *MockApplication_Expecter) OfferSnapshot(_a0 interface{}) *MockApplication_OfferSnapshot_Call { return &MockApplication_OfferSnapshot_Call{Call: _e.mock.On("OfferSnapshot", _a0)} } @@ -525,7 +525,7 @@ func (_c *MockApplication_OfferSnapshot_Call) RunAndReturn(run func(types.Reques return _c } -// Query provides a mock function with given fields: _a0 + func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { ret := _m.Called(_a0) @@ -543,13 +543,13 @@ func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { return r0 } -// MockApplication_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' + type MockApplication_Query_Call struct { *mock.Call } -// Query is a helper method to define mock.On call -// - _a0 types.RequestQuery + + func (_e *MockApplication_Expecter) Query(_a0 interface{}) *MockApplication_Query_Call { return &MockApplication_Query_Call{Call: _e.mock.On("Query", _a0)} } @@ -571,7 +571,7 @@ func (_c *MockApplication_Query_Call) RunAndReturn(run func(types.RequestQuery) return _c } -// SetOption provides a mock function with given fields: _a0 + func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseSetOption { ret := _m.Called(_a0) @@ -589,13 +589,13 @@ func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseS return r0 } -// MockApplication_SetOption_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetOption' + type MockApplication_SetOption_Call struct { *mock.Call } -// SetOption is a helper method to define mock.On call -// - _a0 types.RequestSetOption + + func (_e *MockApplication_Expecter) SetOption(_a0 interface{}) *MockApplication_SetOption_Call { return &MockApplication_SetOption_Call{Call: _e.mock.On("SetOption", _a0)} } @@ -617,8 +617,8 @@ func (_c *MockApplication_SetOption_Call) RunAndReturn(run func(types.RequestSet return _c } -// NewMockApplication creates a new instance of MockApplication. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockApplication(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go index 9ec6b2d18..fc03566e5 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package proxy @@ -9,7 +9,7 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) -// MockAppConnConsensus is an autogenerated mock type for the AppConnConsensus type + type MockAppConnConsensus struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConnConsensus) EXPECT() *MockAppConnConsensus_Expecter { return &MockAppConnConsensus_Expecter{mock: &_m.Mock} } -// BeginBlockSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0) @@ -52,13 +52,13 @@ func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*ty return r0, r1 } -// MockAppConnConsensus_BeginBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeginBlockSync' + type MockAppConnConsensus_BeginBlockSync_Call struct { *mock.Call } -// BeginBlockSync is a helper method to define mock.On call -// - _a0 types.RequestBeginBlock + + func (_e *MockAppConnConsensus_Expecter) BeginBlockSync(_a0 interface{}) *MockAppConnConsensus_BeginBlockSync_Call { return &MockAppConnConsensus_BeginBlockSync_Call{Call: _e.mock.On("BeginBlockSync", _a0)} } @@ -80,7 +80,7 @@ func (_c *MockAppConnConsensus_BeginBlockSync_Call) RunAndReturn(run func(types. return _c } -// CommitSync provides a mock function with given fields: + func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { ret := _m.Called() @@ -110,12 +110,12 @@ func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } -// MockAppConnConsensus_CommitSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CommitSync' + type MockAppConnConsensus_CommitSync_Call struct { *mock.Call } -// CommitSync is a helper method to define mock.On call + func (_e *MockAppConnConsensus_Expecter) CommitSync() *MockAppConnConsensus_CommitSync_Call { return &MockAppConnConsensus_CommitSync_Call{Call: _e.mock.On("CommitSync")} } @@ -137,7 +137,7 @@ func (_c *MockAppConnConsensus_CommitSync_Call) RunAndReturn(run func() (*types. return _c } -// DeliverTxAsync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { ret := _m.Called(_a0) @@ -157,13 +157,13 @@ func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abci return r0 } -// MockAppConnConsensus_DeliverTxAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeliverTxAsync' + type MockAppConnConsensus_DeliverTxAsync_Call struct { *mock.Call } -// DeliverTxAsync is a helper method to define mock.On call -// - _a0 types.RequestDeliverTx + + func (_e *MockAppConnConsensus_Expecter) DeliverTxAsync(_a0 interface{}) *MockAppConnConsensus_DeliverTxAsync_Call { return &MockAppConnConsensus_DeliverTxAsync_Call{Call: _e.mock.On("DeliverTxAsync", _a0)} } @@ -185,7 +185,7 @@ func (_c *MockAppConnConsensus_DeliverTxAsync_Call) RunAndReturn(run func(types. return _c } -// EndBlockSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0) @@ -215,13 +215,13 @@ func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types. return r0, r1 } -// MockAppConnConsensus_EndBlockSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndBlockSync' + type MockAppConnConsensus_EndBlockSync_Call struct { *mock.Call } -// EndBlockSync is a helper method to define mock.On call -// - _a0 types.RequestEndBlock + + func (_e *MockAppConnConsensus_Expecter) EndBlockSync(_a0 interface{}) *MockAppConnConsensus_EndBlockSync_Call { return &MockAppConnConsensus_EndBlockSync_Call{Call: _e.mock.On("EndBlockSync", _a0)} } @@ -243,7 +243,7 @@ func (_c *MockAppConnConsensus_EndBlockSync_Call) RunAndReturn(run func(types.Re return _c } -// Error provides a mock function with given fields: + func (_m *MockAppConnConsensus) Error() error { ret := _m.Called() @@ -261,12 +261,12 @@ func (_m *MockAppConnConsensus) Error() error { return r0 } -// MockAppConnConsensus_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' + type MockAppConnConsensus_Error_Call struct { *mock.Call } -// Error is a helper method to define mock.On call + func (_e *MockAppConnConsensus_Expecter) Error() *MockAppConnConsensus_Error_Call { return &MockAppConnConsensus_Error_Call{Call: _e.mock.On("Error")} } @@ -288,7 +288,7 @@ func (_c *MockAppConnConsensus_Error_Call) RunAndReturn(run func() error) *MockA return _c } -// InitChainSync provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0) @@ -318,13 +318,13 @@ func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*type return r0, r1 } -// MockAppConnConsensus_InitChainSync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitChainSync' + type MockAppConnConsensus_InitChainSync_Call struct { *mock.Call } -// InitChainSync is a helper method to define mock.On call -// - _a0 types.RequestInitChain + + func (_e *MockAppConnConsensus_Expecter) InitChainSync(_a0 interface{}) *MockAppConnConsensus_InitChainSync_Call { return &MockAppConnConsensus_InitChainSync_Call{Call: _e.mock.On("InitChainSync", _a0)} } @@ -346,18 +346,18 @@ func (_c *MockAppConnConsensus_InitChainSync_Call) RunAndReturn(run func(types.R return _c } -// SetResponseCallback provides a mock function with given fields: _a0 + func (_m *MockAppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } -// MockAppConnConsensus_SetResponseCallback_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetResponseCallback' + type MockAppConnConsensus_SetResponseCallback_Call struct { *mock.Call } -// SetResponseCallback is a helper method to define mock.On call -// - _a0 abcicli.Callback + + func (_e *MockAppConnConsensus_Expecter) SetResponseCallback(_a0 interface{}) *MockAppConnConsensus_SetResponseCallback_Call { return &MockAppConnConsensus_SetResponseCallback_Call{Call: _e.mock.On("SetResponseCallback", _a0)} } @@ -379,8 +379,8 @@ func (_c *MockAppConnConsensus_SetResponseCallback_Call) RunAndReturn(run func(a return _c } -// NewMockAppConnConsensus creates a new instance of MockAppConnConsensus. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockAppConnConsensus(t interface { mock.TestingT Cleanup(func()) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go index affc90a4e..ea1b7934a 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.42.3. DO NOT EDIT. + package proxy @@ -9,7 +9,7 @@ import ( proxy "github.com/tendermint/tendermint/proxy" ) -// MockAppConns is an autogenerated mock type for the AppConns type + type MockAppConns struct { mock.Mock } @@ -22,7 +22,7 @@ func (_m *MockAppConns) EXPECT() *MockAppConns_Expecter { return &MockAppConns_Expecter{mock: &_m.Mock} } -// Consensus provides a mock function with given fields: + func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { ret := _m.Called() @@ -42,12 +42,12 @@ func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { return r0 } -// MockAppConns_Consensus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Consensus' + type MockAppConns_Consensus_Call struct { *mock.Call } -// Consensus is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Consensus() *MockAppConns_Consensus_Call { return &MockAppConns_Consensus_Call{Call: _e.mock.On("Consensus")} } @@ -69,7 +69,7 @@ func (_c *MockAppConns_Consensus_Call) RunAndReturn(run func() proxy.AppConnCons return _c } -// IsRunning provides a mock function with given fields: + func (_m *MockAppConns) IsRunning() bool { ret := _m.Called() @@ -87,12 +87,12 @@ func (_m *MockAppConns) IsRunning() bool { return r0 } -// MockAppConns_IsRunning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsRunning' + type MockAppConns_IsRunning_Call struct { *mock.Call } -// IsRunning is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) IsRunning() *MockAppConns_IsRunning_Call { return &MockAppConns_IsRunning_Call{Call: _e.mock.On("IsRunning")} } @@ -114,7 +114,7 @@ func (_c *MockAppConns_IsRunning_Call) RunAndReturn(run func() bool) *MockAppCon return _c } -// Mempool provides a mock function with given fields: + func (_m *MockAppConns) Mempool() proxy.AppConnMempool { ret := _m.Called() @@ -134,12 +134,12 @@ func (_m *MockAppConns) Mempool() proxy.AppConnMempool { return r0 } -// MockAppConns_Mempool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mempool' + type MockAppConns_Mempool_Call struct { *mock.Call } -// Mempool is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Mempool() *MockAppConns_Mempool_Call { return &MockAppConns_Mempool_Call{Call: _e.mock.On("Mempool")} } @@ -161,7 +161,7 @@ func (_c *MockAppConns_Mempool_Call) RunAndReturn(run func() proxy.AppConnMempoo return _c } -// OnReset provides a mock function with given fields: + func (_m *MockAppConns) OnReset() error { ret := _m.Called() @@ -179,12 +179,12 @@ func (_m *MockAppConns) OnReset() error { return r0 } -// MockAppConns_OnReset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnReset' + type MockAppConns_OnReset_Call struct { *mock.Call } -// OnReset is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnReset() *MockAppConns_OnReset_Call { return &MockAppConns_OnReset_Call{Call: _e.mock.On("OnReset")} } @@ -206,7 +206,7 @@ func (_c *MockAppConns_OnReset_Call) RunAndReturn(run func() error) *MockAppConn return _c } -// OnStart provides a mock function with given fields: + func (_m *MockAppConns) OnStart() error { ret := _m.Called() @@ -224,12 +224,12 @@ func (_m *MockAppConns) OnStart() error { return r0 } -// MockAppConns_OnStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStart' + type MockAppConns_OnStart_Call struct { *mock.Call } -// OnStart is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnStart() *MockAppConns_OnStart_Call { return &MockAppConns_OnStart_Call{Call: _e.mock.On("OnStart")} } @@ -251,17 +251,17 @@ func (_c *MockAppConns_OnStart_Call) RunAndReturn(run func() error) *MockAppConn return _c } -// OnStop provides a mock function with given fields: + func (_m *MockAppConns) OnStop() { _m.Called() } -// MockAppConns_OnStop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStop' + type MockAppConns_OnStop_Call struct { *mock.Call } -// OnStop is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) OnStop() *MockAppConns_OnStop_Call { return &MockAppConns_OnStop_Call{Call: _e.mock.On("OnStop")} } @@ -283,7 +283,7 @@ func (_c *MockAppConns_OnStop_Call) RunAndReturn(run func()) *MockAppConns_OnSto return _c } -// Query provides a mock function with given fields: + func (_m *MockAppConns) Query() proxy.AppConnQuery { ret := _m.Called() @@ -303,12 +303,12 @@ func (_m *MockAppConns) Query() proxy.AppConnQuery { return r0 } -// MockAppConns_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' + type MockAppConns_Query_Call struct { *mock.Call } -// Query is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Query() *MockAppConns_Query_Call { return &MockAppConns_Query_Call{Call: _e.mock.On("Query")} } @@ -330,7 +330,7 @@ func (_c *MockAppConns_Query_Call) RunAndReturn(run func() proxy.AppConnQuery) * return _c } -// Quit provides a mock function with given fields: + func (_m *MockAppConns) Quit() <-chan struct{} { ret := _m.Called() @@ -350,12 +350,12 @@ func (_m *MockAppConns) Quit() <-chan struct{} { return r0 } -// MockAppConns_Quit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Quit' + type MockAppConns_Quit_Call struct { *mock.Call } -// Quit is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Quit() *MockAppConns_Quit_Call { return &MockAppConns_Quit_Call{Call: _e.mock.On("Quit")} } @@ -377,7 +377,7 @@ func (_c *MockAppConns_Quit_Call) RunAndReturn(run func() <-chan struct{}) *Mock return _c } -// Reset provides a mock function with given fields: + func (_m *MockAppConns) Reset() error { ret := _m.Called() @@ -395,12 +395,12 @@ func (_m *MockAppConns) Reset() error { return r0 } -// MockAppConns_Reset_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reset' + type MockAppConns_Reset_Call struct { *mock.Call } -// Reset is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Reset() *MockAppConns_Reset_Call { return &MockAppConns_Reset_Call{Call: _e.mock.On("Reset")} } @@ -422,18 +422,18 @@ func (_c *MockAppConns_Reset_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } -// SetLogger provides a mock function with given fields: _a0 + func (_m *MockAppConns) SetLogger(_a0 log.Logger) { _m.Called(_a0) } -// MockAppConns_SetLogger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetLogger' + type MockAppConns_SetLogger_Call struct { *mock.Call } -// SetLogger is a helper method to define mock.On call -// - _a0 log.Logger + + func (_e *MockAppConns_Expecter) SetLogger(_a0 interface{}) *MockAppConns_SetLogger_Call { return &MockAppConns_SetLogger_Call{Call: _e.mock.On("SetLogger", _a0)} } @@ -455,7 +455,7 @@ func (_c *MockAppConns_SetLogger_Call) RunAndReturn(run func(log.Logger)) *MockA return _c } -// Snapshot provides a mock function with given fields: + func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { ret := _m.Called() @@ -475,12 +475,12 @@ func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { return r0 } -// MockAppConns_Snapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Snapshot' + type MockAppConns_Snapshot_Call struct { *mock.Call } -// Snapshot is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Snapshot() *MockAppConns_Snapshot_Call { return &MockAppConns_Snapshot_Call{Call: _e.mock.On("Snapshot")} } @@ -502,7 +502,7 @@ func (_c *MockAppConns_Snapshot_Call) RunAndReturn(run func() proxy.AppConnSnaps return _c } -// Start provides a mock function with given fields: + func (_m *MockAppConns) Start() error { ret := _m.Called() @@ -520,12 +520,12 @@ func (_m *MockAppConns) Start() error { return r0 } -// MockAppConns_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' + type MockAppConns_Start_Call struct { *mock.Call } -// Start is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Start() *MockAppConns_Start_Call { return &MockAppConns_Start_Call{Call: _e.mock.On("Start")} } @@ -547,7 +547,7 @@ func (_c *MockAppConns_Start_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } -// Stop provides a mock function with given fields: + func (_m *MockAppConns) Stop() error { ret := _m.Called() @@ -565,12 +565,12 @@ func (_m *MockAppConns) Stop() error { return r0 } -// MockAppConns_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' + type MockAppConns_Stop_Call struct { *mock.Call } -// Stop is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) Stop() *MockAppConns_Stop_Call { return &MockAppConns_Stop_Call{Call: _e.mock.On("Stop")} } @@ -592,7 +592,7 @@ func (_c *MockAppConns_Stop_Call) RunAndReturn(run func() error) *MockAppConns_S return _c } -// String provides a mock function with given fields: + func (_m *MockAppConns) String() string { ret := _m.Called() @@ -610,12 +610,12 @@ func (_m *MockAppConns) String() string { return r0 } -// MockAppConns_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' + type MockAppConns_String_Call struct { *mock.Call } -// String is a helper method to define mock.On call + func (_e *MockAppConns_Expecter) String() *MockAppConns_String_Call { return &MockAppConns_String_Call{Call: _e.mock.On("String")} } @@ -637,8 +637,8 @@ func (_c *MockAppConns_String_Call) RunAndReturn(run func() string) *MockAppConn return _c } -// NewMockAppConns creates a new instance of MockAppConns. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. + + func NewMockAppConns(t interface { mock.TestingT Cleanup(func()) diff --git a/node/events/types.go b/node/events/types.go index 3af471f10..069ee116c 100644 --- a/node/events/types.go +++ b/node/events/types.go @@ -6,24 +6,24 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) -// Type Keys + const ( - // NodeTypeKey is a reserved composite key for event name. + NodeTypeKey = "node.event" ) -// Types + const ( HealthStatus = "HealthStatus" ) -// Convenience + var HealthStatusList = map[string][]string{NodeTypeKey: {HealthStatus}} type DataHealthStatus struct { - // Error is the error that was encountered in case of a health check failure. Nil implies healthy. + Error error } @@ -31,6 +31,6 @@ func (dhs DataHealthStatus) String() string { return fmt.Sprintf("DataHealthStatus{Error: %v}", dhs.Error) } -// Queries + var QueryHealthStatus = uevent.QueryFor(NodeTypeKey, HealthStatus) diff --git a/node/mempool/mempool.go b/node/mempool/mempool.go index 80477193c..1d7c53310 100644 --- a/node/mempool/mempool.go +++ b/node/mempool/mempool.go @@ -15,12 +15,12 @@ const ( type MempoolIDs struct { mtx tmsync.RWMutex peerMap map[peer.ID]uint16 - nextID uint16 // assumes that a node will never have over 65536 active peers - activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter + nextID uint16 + activeIDs map[uint16]struct{} } -// Reserve searches for the next unused ID and assigns it to the -// peer. + + func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -30,8 +30,8 @@ func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.activeIDs[curID] = struct{}{} } -// nextPeerID returns the next unused peer ID to use. -// This assumes that ids's mutex is already locked. + + func (ids *MempoolIDs) nextPeerID() uint16 { if len(ids.activeIDs) == maxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) @@ -47,7 +47,7 @@ func (ids *MempoolIDs) nextPeerID() uint16 { return curID } -// Reclaim returns the ID reserved for the peer back to unused pool. + func (ids *MempoolIDs) Reclaim(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -59,7 +59,7 @@ func (ids *MempoolIDs) Reclaim(peer peer.ID) { } } -// GetForPeer returns an ID for the peer. ID is generated if required. + func (ids *MempoolIDs) GetForPeer(peer peer.ID) uint16 { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -78,6 +78,6 @@ func NewMempoolIDs() *MempoolIDs { return &MempoolIDs{ peerMap: make(map[peer.ID]uint16), activeIDs: map[uint16]struct{}{0: {}}, - nextID: 1, // reserve unknownPeerID(0) for mempoolReactor.BroadcastTx + nextID: 1, } } diff --git a/node/node.go b/node/node.go index f0f1a88e5..7cfe74792 100644 --- a/node/node.go +++ b/node/node.go @@ -34,15 +34,15 @@ import ( "github.com/dymensionxyz/dymint/store" ) -// prefixes used in KV store to separate main node data from DALC data + var ( mainPrefix = []byte{0} dalcPrefix = []byte{1} indexerPrefix = []byte{2} ) -// Node represents a client node in Dymint network. -// It connects all the components and orchestrates their work. + + type Node struct { service.BaseService eventBus *tmtypes.EventBus @@ -54,7 +54,7 @@ type Node struct { conf config.NodeConfig P2P *p2p.Client - // TODO(tzdybal): consider extracting "mempool reactor" + Mempool mempool.Mempool MempoolIDs *nodemempool.MempoolIDs incomingTxCh chan *p2p.GossipMessage @@ -68,12 +68,12 @@ type Node struct { BlockIndexer indexer.BlockIndexer IndexerService *txindex.IndexerService - // shared context for all dymint components + ctx context.Context cancel context.CancelFunc } -// NewNode creates new Dymint node. + func NewNode( ctx context.Context, conf config.NodeConfig, @@ -102,12 +102,12 @@ func NewNode( var baseKV store.KV var dstore datastore.Datastore - if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { // this is used for testing + if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { logger.Info("WARNING: working in in-memory mode") baseKV = store.NewDefaultInMemoryKVStore() dstore = datastore.NewMapDatastore() } else { - // TODO(omritoptx): Move dymint to const + baseKV = store.NewKVStore(conf.RootDir, conf.DBPath, "dymint", conf.DBConfig.SyncWrites, logger) path := filepath.Join(store.Rootify(conf.RootDir, conf.DBPath), "blocksync") var err error @@ -120,9 +120,9 @@ func NewNode( s := store.New(store.NewPrefixKV(baseKV, mainPrefix)) indexerKV := store.NewPrefixKV(baseKV, indexerPrefix) - // TODO: dalcKV is needed for mock only. Initialize only if mock used + dalcKV := store.NewPrefixKV(baseKV, dalcPrefix) - // Init the settlement layer client + settlementlc := slregistry.GetClient(slregistry.Client(conf.SettlementLayer)) if settlementlc == nil { return nil, fmt.Errorf("get settlement client: named: %s", conf.SettlementLayer) @@ -161,7 +161,7 @@ func NewNode( settlementlc, eventBus, pubsubServer, - nil, // p2p client is set later + nil, dalcKV, indexerService, logger, @@ -170,7 +170,7 @@ func NewNode( return nil, fmt.Errorf("BlockManager initialization: %w", err) } - // Set p2p client and it's validators + p2pValidator := p2p.NewValidator(logger.With("module", "p2p_validator"), blockManager) p2pClient, err := p2p.NewClient(conf.P2PConfig, p2pKey, genesis.ChainID, s, pubsubServer, dstore, logger.With("module", "p2p")) if err != nil { @@ -179,7 +179,7 @@ func NewNode( p2pClient.SetTxValidator(p2pValidator.TxValidator(mp, mpIDs)) p2pClient.SetBlockValidator(p2pValidator.BlockValidator()) - // Set p2p client in block manager + blockManager.P2PClient = p2pClient ctx, cancel := context.WithCancel(ctx) @@ -209,7 +209,7 @@ func NewNode( return node, nil } -// OnStart is a part of Service interface. + func (n *Node) OnStart() error { n.Logger.Info("starting P2P client") err := n.P2P.Start(n.ctx) @@ -234,7 +234,7 @@ func (n *Node) OnStart() error { } }() - // start the block manager + err = n.BlockManager.Start(n.ctx) if err != nil { return fmt.Errorf("while starting block manager: %w", err) @@ -243,12 +243,12 @@ func (n *Node) OnStart() error { return nil } -// GetGenesis returns entire genesis doc. + func (n *Node) GetGenesis() *tmtypes.GenesisDoc { return n.genesis } -// OnStop is a part of Service interface. + func (n *Node) OnStop() { err := n.BlockManager.DAClient.Stop() if err != nil { @@ -273,32 +273,32 @@ func (n *Node) OnStop() { n.cancel() } -// OnReset is a part of Service interface. + func (n *Node) OnReset() error { panic("OnReset - not implemented!") } -// SetLogger sets the logger used by node. + func (n *Node) SetLogger(logger log.Logger) { n.Logger = logger } -// GetLogger returns logger. + func (n *Node) GetLogger() log.Logger { return n.Logger } -// EventBus gives access to Node's event bus. + func (n *Node) EventBus() *tmtypes.EventBus { return n.eventBus } -// PubSubServer gives access to the Node's pubsub server + func (n *Node) PubSubServer() *pubsub.Server { return n.PubsubServer } -// ProxyApp returns ABCI proxy connections to communicate with application. + func (n *Node) ProxyApp() proxy.AppConns { return n.proxyApp } diff --git a/p2p/block.go b/p2p/block.go index d6da3da96..754c17973 100644 --- a/p2p/block.go +++ b/p2p/block.go @@ -6,24 +6,24 @@ import ( tmcrypto "github.com/tendermint/tendermint/crypto" ) -/* -------------------------------------------------------------------------- */ -/* Event Data */ -/* -------------------------------------------------------------------------- */ -// BlockData defines the struct of the data for each block sent via P2P + + + + type BlockData struct { - // Block is the block that was gossiped + Block types.Block - // Commit is the commit that was gossiped + Commit types.Commit } -// MarshalBinary encodes BlockData into binary form and returns it. + func (b *BlockData) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of p2p received block into object. + func (b *BlockData) UnmarshalBinary(data []byte) error { var pbBlock pb.BlockData err := pbBlock.Unmarshal(data) @@ -34,7 +34,7 @@ func (b *BlockData) UnmarshalBinary(data []byte) error { return err } -// ToProto converts Data into protobuf representation and returns it. + func (b *BlockData) ToProto() *pb.BlockData { return &pb.BlockData{ Block: b.Block.ToProto(), @@ -42,7 +42,7 @@ func (b *BlockData) ToProto() *pb.BlockData { } } -// FromProto fills BlockData with data from its protobuf representation. + func (b *BlockData) FromProto(other *pb.BlockData) error { if err := b.Block.FromProto(other.Block); err != nil { return err @@ -53,7 +53,7 @@ func (b *BlockData) FromProto(other *pb.BlockData) error { return nil } -// Validate run basic validation on the p2p block received + func (b *BlockData) Validate(proposerPubKey tmcrypto.PubKey) error { if err := b.Block.ValidateBasic(); err != nil { return err diff --git a/p2p/block_sync.go b/p2p/block_sync.go index f8be1e2c0..49cfefb93 100644 --- a/p2p/block_sync.go +++ b/p2p/block_sync.go @@ -20,48 +20,48 @@ import ( "github.com/libp2p/go-libp2p/core/host" ) -// Blocksync is a protocol used to retrieve blocks on demand from the P2P network. -// Nodes store received blocks from gossip in an IPFS blockstore and nodes are able to request them on demand using bitswap protocol. -// In order to discover the identifier (CID) of each block a DHT request needs to be made for the specific block height. -// Nodes need to advertise CIDs/height map to the DHT periodically. -// https://www.notion.so/dymension/ADR-x-Rollapp-block-sync-protocol-6ee48b232a6a45e09989d67f1a6c0297?pvs=4 + + + + + type BlockSync struct { - // service that reads/writes blocks either from local datastore or the P2P network + bsrv blockservice.BlockService - // local datastore for IPFS blocks + bstore blockstore.Blockstore - // protocol used to obtain blocks from the P2P network + net network.BitSwapNetwork - // used to find all data chunks that are part of the same block + dsrv BlockSyncDagService - // used to define the content identifiers of each data chunk + cidBuilder cid.Builder logger types.Logger } type BlockSyncMessageHandler func(block *BlockData) -// SetupBlockSync initializes all services required to provide and retrieve block data in the P2P network. + func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, logger types.Logger) *BlockSync { - // construct a datastore + ds := dsync.MutexWrap(store) - // set a blockstore (to store IPFS data chunks) with the previous datastore + bs := blockstore.NewBlockstore(ds) - // initialize bitswap network used to retrieve data chunks from other peers in the P2P network + bsnet := network.NewFromIpfsHost(h, &routinghelpers.Null{}, network.Prefix("/dymension/block-sync/")) - // Bitswap server that provides data to the network. + bsserver := server.New( ctx, bsnet, bs, - server.ProvideEnabled(false), // we don't provide blocks over DHT + server.ProvideEnabled(false), server.SetSendDontHaves(false), ) - // Bitswap client that retrieves data from the network. + bsclient := client.New( ctx, bsnet, @@ -71,7 +71,7 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, client.WithoutDuplicatedBlockStats(), ) - // start the network + bsnet.Start(bsserver, bsclient) bsrv := blockservice.New(bs, bsclient) @@ -93,12 +93,12 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, return blockSync } -// SaveBlock stores the blocks produced in the DAG services to be retrievable from the P2P network. + func (blocksync *BlockSync) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { return blocksync.dsrv.SaveBlock(ctx, block) } -// LoadBlock retrieves the blocks (from the local blockstore or the network) using the DAGService to discover all data chunks that are part of the same block. + func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockData, error) { blockBytes, err := blocksync.dsrv.LoadBlock(ctx, cid) if err != nil { @@ -111,7 +111,7 @@ func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockDa return block, nil } -// RemoveBlock removes the block from the DAGservice. + func (blocksync *BlockSync) DeleteBlock(ctx context.Context, cid cid.Cid) error { return blocksync.dsrv.DeleteBlock(ctx, cid) } diff --git a/p2p/block_sync_dag.go b/p2p/block_sync_dag.go index d9df4d440..2502d9cd5 100644 --- a/p2p/block_sync_dag.go +++ b/p2p/block_sync_dag.go @@ -21,8 +21,8 @@ type BlockSyncDagService struct { cidBuilder cid.Builder } -// NewDAGService inits the DAGservice used to retrieve/send blocks data in the P2P. -// Block data is organized in a merkle DAG using IPLD (https://ipld.io/docs/) + + func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { bsDagService := &BlockSyncDagService{ cidBuilder: &cid.Prefix{ @@ -37,15 +37,15 @@ func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { return *bsDagService } -// SaveBlock splits the block in chunks of 256KB and it creates a new merkle DAG with them. it returns the content identifier (cid) of the root node of the DAG. -// Using the root CID the whole block can be retrieved using the DAG service + + func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { blockReader := bytes.NewReader(block) splitter := chunker.NewSizeSplitter(blockReader, chunker.DefaultBlockSize) nodes := []*dag.ProtoNode{} - // the loop creates nodes for each block chunk and sets each cid + for { nextData, err := splitter.NextBytes() if err == io.EOF { @@ -63,14 +63,14 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] } - // an empty root node is created + root := dag.NodeWithData(nil) err := root.SetCidBuilder(bsDagService.cidBuilder) if err != nil { return cid.Undef, err } - // and linked to all chunks that are added to the DAGservice + for _, n := range nodes { err := root.AddNodeLink(n.Cid().String(), n) @@ -90,21 +90,21 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] return root.Cid(), nil } -// LoadBlock returns the block data obtained from the DAGService, using the root cid, either from the network or the local blockstore + func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid.Cid) ([]byte, error) { - // first it gets the root node + nd, err := bsDagService.Get(ctx, cid) if err != nil { return nil, err } - // then it gets all the data from the root node + read, err := dagReader(nd, bsDagService) if err != nil { return nil, err } - // the data is read to bytes array + data, err := io.ReadAll(read) if err != nil { return nil, err @@ -113,13 +113,13 @@ func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid. } func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid cid.Cid) error { - // first it gets the root node + root, err := bsDagService.Get(ctx, cid) if err != nil { return err } - // then it iterates all the cids to remove them from the block store + for _, l := range root.Links() { err := bsDagService.Remove(ctx, l.Cid) if err != nil { @@ -129,12 +129,12 @@ func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid ci return nil } -// dagReader is used to read the DAG (all the block chunks) from the root (IPLD) node + func dagReader(root ipld.Node, ds ipld.DAGService) (io.Reader, error) { ctx := context.Background() buf := new(bytes.Buffer) - // the loop retrieves all the nodes (block chunks) either from the local store or the network, in case it is not there. + for _, l := range root.Links() { n, err := ds.Get(ctx, l.Cid) if err != nil { diff --git a/p2p/blocks_received.go b/p2p/blocks_received.go index ceaf0bf67..0541f9599 100644 --- a/p2p/blocks_received.go +++ b/p2p/blocks_received.go @@ -2,15 +2,15 @@ package p2p import "sync" -// BlocksReceived tracks blocks received from P2P to know what are the missing blocks that need to be requested on demand + type BlocksReceived struct { blocksReceived map[uint64]struct{} latestSeenHeight uint64 - // mutex to protect blocksReceived map access + blockReceivedMu sync.Mutex } -// addBlockReceived adds the block height to a map + func (br *BlocksReceived) AddBlockReceived(height uint64) { br.latestSeenHeight = max(height, br.latestSeenHeight) br.blockReceivedMu.Lock() @@ -18,7 +18,7 @@ func (br *BlocksReceived) AddBlockReceived(height uint64) { br.blocksReceived[height] = struct{}{} } -// isBlockReceived checks if a block height is already received + func (br *BlocksReceived) IsBlockReceived(height uint64) bool { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -26,7 +26,7 @@ func (br *BlocksReceived) IsBlockReceived(height uint64) bool { return ok } -// removeBlocksReceivedUpToHeight clears previous received block heights + func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -37,7 +37,7 @@ func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { } } -// GetLatestSeenHeight returns the latest height stored + func (br *BlocksReceived) GetLatestSeenHeight() uint64 { return br.latestSeenHeight } diff --git a/p2p/client.go b/p2p/client.go index 596669f99..d21979efe 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -33,29 +33,29 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// TODO(tzdybal): refactor to configuration parameters + const ( - // reAdvertisePeriod defines a period after which P2P client re-attempt advertising namespace in DHT. + reAdvertisePeriod = 1 * time.Hour - // peerLimit defines limit of number of peers returned during active peer discovery. + peerLimit = 60 - // txTopicSuffix is added after namespace to create pubsub topic for TX gossiping. + txTopicSuffix = "-tx" - // blockTopicSuffix is added after namespace to create pubsub topic for block gossiping. + blockTopicSuffix = "-block" - // blockSyncProtocolSuffix is added after namespace to create blocksync protocol prefix. + blockSyncProtocolPrefix = "block-sync" ) -// Client is a P2P client, implemented with libp2p. -// -// Initially, client connects to predefined seed nodes (aka bootnodes, bootstrap nodes). -// Those seed nodes serve Kademlia DHT protocol, and are agnostic to ORU chain. Using DHT -// peer routing and discovery clients find other peers within ORU network. + + + + + type Client struct { conf config.P2PConfig chainID string @@ -71,18 +71,18 @@ type Client struct { blockGossiper *Gossiper blockValidator GossipValidator - // cancel is used to cancel context passed to libp2p functions - // it's required because of discovery.Advertise call + + cancel context.CancelFunc localPubsubServer *tmpubsub.Server logger types.Logger - // blocksync instance used to save and retrieve blocks from the P2P network on demand + blocksync *BlockSync - // store used to store retrievable blocks using blocksync + blockSyncStore datastore.Datastore store store.Store @@ -90,10 +90,10 @@ type Client struct { blocksReceived *BlocksReceived } -// NewClient creates new Client object. -// -// Basic checks on parameters are done, and default parameters are provided for unset-configuration -// TODO(tzdybal): consider passing entire config, not just P2P config, to reduce number of arguments + + + + func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, store store.Store, localPubsubServer *tmpubsub.Server, blockSyncStore datastore.Datastore, logger types.Logger) (*Client, error) { if privKey == nil { return nil, fmt.Errorf("private key: %w", gerrc.ErrNotFound) @@ -116,15 +116,15 @@ func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, st }, nil } -// Start establish Client's P2P connectivity. -// -// Following steps are taken: -// 1. Setup libp2p host, start listening for incoming connections. -// 2. Setup gossibsub. -// 3. Setup DHT, establish connection to seed nodes and initialize peer discovery. -// 4. Use active peer discovery to look for peers from same ORU network. + + + + + + + func (c *Client) Start(ctx context.Context) error { - // create new, cancelable context + ctx, c.cancel = context.WithCancel(ctx) host, err := c.listen() if err != nil { @@ -171,7 +171,7 @@ func (c *Client) StartWithHost(ctx context.Context, h host.Host) error { return nil } -// Close gently stops Client. + func (c *Client) Close() error { c.cancel() @@ -183,24 +183,24 @@ func (c *Client) Close() error { ) } -// GossipTx sends the transaction to the P2P network. + func (c *Client) GossipTx(ctx context.Context, tx []byte) error { c.logger.Debug("Gossiping transaction.", "len", len(tx)) return c.txGossiper.Publish(ctx, tx) } -// SetTxValidator sets the callback function, that will be invoked during message gossiping. + func (c *Client) SetTxValidator(val GossipValidator) { c.txValidator = val } -// GossipBlock sends the block, and it's commit to the P2P network. + func (c *Client) GossipBlock(ctx context.Context, blockBytes []byte) error { c.logger.Debug("Gossiping block.", "len", len(blockBytes)) return c.blockGossiper.Publish(ctx, blockBytes) } -// SaveBlock stores the block in the blocksync datastore, stores locally the returned identifier and advertises the identifier to the DHT, so other nodes can know the identifier for the block height. + func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, blockBytes []byte) error { if !c.conf.BlockSyncEnabled { return nil @@ -228,7 +228,7 @@ func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, return nil } -// RemoveBlocks is used to prune blocks from the block sync datastore. + func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { prunedBlocks := uint64(0) @@ -269,13 +269,13 @@ func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { return prunedBlocks, nil } -// AdvertiseBlockIdToDHT is used to advertise the identifier (cid) for a specific block height and revision to the DHT, using a PutValue operation + func (c *Client) AdvertiseBlockIdToDHT(ctx context.Context, height uint64, revision uint64, cid cid.Cid) error { err := c.DHT.PutValue(ctx, getBlockSyncKeyByHeight(height, revision), []byte(cid.String())) return err } -// GetBlockIdFromDHT is used to retrieve the identifier (cid) for a specific block height and revision from the DHT, using a GetValue operation + func (c *Client) GetBlockIdFromDHT(ctx context.Context, height uint64, revision uint64) (cid.Cid, error) { cidBytes, err := c.DHT.GetValue(ctx, getBlockSyncKeyByHeight(height, revision)) if err != nil { @@ -288,23 +288,23 @@ func (c *Client) UpdateLatestSeenHeight(height uint64) { c.blocksReceived.latestSeenHeight = max(height, c.blocksReceived.latestSeenHeight) } -// SetBlockValidator sets the callback function, that will be invoked after block is received from P2P network. + func (c *Client) SetBlockValidator(validator GossipValidator) { c.blockValidator = validator } -// Addrs returns listen addresses of Client. + func (c *Client) Addrs() []multiaddr.Multiaddr { return c.Host.Addrs() } -// Info returns p2p info + func (c *Client) Info() (p2p.ID, string, string) { return p2p.ID(hex.EncodeToString([]byte(c.Host.ID()))), c.conf.ListenAddress, c.chainID } -// PeerConnection describe basic information about P2P connection. -// TODO(tzdybal): move it somewhere + + type PeerConnection struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` @@ -312,7 +312,7 @@ type PeerConnection struct { RemoteIP string `json:"remote_ip"` } -// Peers returns list of peers connected to Client. + func (c *Client) Peers() []PeerConnection { conns := c.Host.Network().Conns() res := make([]PeerConnection, 0, len(conns)) @@ -322,12 +322,12 @@ func (c *Client) Peers() []PeerConnection { ListenAddr: c.conf.ListenAddress, Network: c.chainID, DefaultNodeID: p2p.ID(conn.RemotePeer().String()), - // TODO(tzdybal): fill more fields + }, IsOutbound: conn.Stat().Direction == network.DirOutbound, ConnectionStatus: p2p.ConnectionStatus{ Duration: time.Since(conn.Stat().Opened), - // TODO(tzdybal): fill more fields + }, RemoteIP: conn.RemoteMultiaddr().String(), } @@ -407,7 +407,7 @@ func (c *Client) peerDiscovery(ctx context.Context) error { } func (c *Client) setupPeerDiscovery(ctx context.Context) error { - // wait for DHT + select { case <-ctx.Done(): return nil @@ -443,7 +443,7 @@ func (c *Client) findPeers(ctx context.Context) error { return nil } -// tryConnect attempts to connect to a peer and logs error if necessary + func (c *Client) tryConnect(ctx context.Context, peer peer.AddrInfo) { c.logger.Debug("Trying to connect to peer.", "peer", peer) err := c.Host.Connect(ctx, peer) @@ -463,7 +463,7 @@ func (c *Client) setupGossiping(ctx context.Context) error { return err } - // tx gossiper receives the tx to add to the mempool through validation process, since it is a joint process + c.txGossiper, err = NewGossiper(c.Host, ps, c.getTxTopic(), nil, c.logger, WithValidator(c.txValidator)) if err != nil { return err @@ -502,43 +502,43 @@ func (c *Client) GetSeedAddrInfo(seedStr string) []peer.AddrInfo { return addrs } -// getNamespace returns unique string identifying ORU network. -// -// It is used to advertise/find peers in libp2p DHT. -// For now, chainID is used. + + + + func (c *Client) getNamespace() string { return c.chainID } -// topic used to transmit transactions in gossipsub + func (c *Client) getTxTopic() string { return c.getNamespace() + txTopicSuffix } -// topic used to transmit blocks in gossipsub + func (c *Client) getBlockTopic() string { return c.getNamespace() + blockTopicSuffix } -// NewTxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. If the transaction is valid, then it is added to the mempool + + func (c *Client) NewTxValidator() GossipValidator { return func(g *GossipMessage) bool { return true } } -// blockSyncReceived is called on reception of new block via blocksync protocol + func (c *Client) blockSyncReceived(block *BlockData) { err := c.localPubsubServer.PublishWithEvents(context.Background(), *block, map[string][]string{EventTypeKey: {EventNewBlockSyncBlock}}) if err != nil { c.logger.Error("Publishing event.", "err", err) } - // Received block is cached and no longer needed to request using blocksync + c.blocksReceived.AddBlockReceived(block.Block.Header.Height) } -// blockSyncReceived is called on reception of new block via gossip protocol + func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { var gossipedBlock BlockData if err := gossipedBlock.UnmarshalBinary(block); err != nil { @@ -550,7 +550,7 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { } if c.conf.BlockSyncEnabled { _, err := c.store.LoadBlockCid(gossipedBlock.Block.Header.Height) - // skip block already added to blocksync + if err == nil { return } @@ -558,13 +558,13 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { if err != nil { c.logger.Error("Adding block to blocksync store.", "err", err, "height", gossipedBlock.Block.Header.Height) } - // Received block is cached and no longer needed to request using blocksync + c.blocksReceived.AddBlockReceived(gossipedBlock.Block.Header.Height) } } -// bootstrapLoop is used to periodically check if the node is connected to other nodes in the P2P network, re-bootstrapping the DHT in case it is necessary, -// or to try to connect to the persistent peers + + func (c *Client) bootstrapLoop(ctx context.Context) { ticker := time.NewTicker(c.conf.BootstrapRetryTime) defer ticker.Stop() @@ -590,7 +590,7 @@ func (c *Client) bootstrapLoop(ctx context.Context) { } } -// retrieveBlockSyncLoop checks if there is any block not received, previous to the latest block height received, to request it on demand + func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSyncMessageHandler) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -600,7 +600,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync case <-ctx.Done(): return case <-ticker.C: - // if no connected at p2p level, dont try + if len(c.Peers()) == 0 { continue } @@ -609,8 +609,8 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync continue } - // this loop iterates and retrieves all the blocks between the last block applied and the greatest height received, - // skipping any block cached, since are already received. + + for h := state.NextHeight(); h <= c.blocksReceived.latestSeenHeight; h++ { if ctx.Err() != nil { return @@ -653,7 +653,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync } } -// advertiseBlockSyncCids is used to advertise all the block identifiers (cids) stored in the local store to the DHT on startup + func (c *Client) advertiseBlockSyncCids(ctx context.Context) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -663,7 +663,7 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - // if no connected at p2p level, it will try again after ticker time + if len(c.Peers()) == 0 { continue } @@ -693,13 +693,13 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { } } - // just try once and then quit when finished + return } } } -// findConnection returns true in case the node is already connected to the peer specified. + func (c *Client) findConnection(peer peer.AddrInfo) bool { for _, con := range c.Host.Network().Conns() { if peer.ID == con.RemotePeer() { @@ -713,7 +713,7 @@ func getBlockSyncKeyByHeight(height uint64, revision uint64) string { return "/" + blockSyncProtocolPrefix + "/" + strconv.FormatUint(revision, 10) + "/" + strconv.FormatUint(height, 10) } -// validates that the content identifiers advertised in the DHT are valid. + type blockIdValidator struct{} func (blockIdValidator) Validate(_ string, id []byte) error { diff --git a/p2p/events.go b/p2p/events.go index 45a0064a5..f88ca45e6 100644 --- a/p2p/events.go +++ b/p2p/events.go @@ -4,12 +4,12 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) -/* -------------------------------------------------------------------------- */ -/* Event types */ -/* -------------------------------------------------------------------------- */ + + + const ( - // EventTypeKey is a reserved composite key for event name. + EventTypeKey = "p2p.event" ) @@ -18,12 +18,12 @@ const ( EventNewBlockSyncBlock = "NewBlockSyncBlock" ) -/* -------------------------------------------------------------------------- */ -/* Queries */ -/* -------------------------------------------------------------------------- */ -// EventQueryNewGossipedBlock is the query used for getting EventNewGossipedBlock + + + + var EventQueryNewGossipedBlock = uevent.QueryFor(EventTypeKey, EventNewGossipedBlock) -// EventQueryNewBlockSyncBlock is the query used for getting EventNewBlockSyncBlock + var EventQueryNewBlockSyncBlock = uevent.QueryFor(EventTypeKey, EventNewBlockSyncBlock) diff --git a/p2p/gossip.go b/p2p/gossip.go index 6d4236e4c..2cb7c3f65 100644 --- a/p2p/gossip.go +++ b/p2p/gossip.go @@ -13,28 +13,28 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// buffer size used by gossipSub router to consume received packets (blocks or txs). packets are dropped in case buffer overflows. in case of blocks, it can buffer up to 5 minutes (assuming 200ms block rate) + const pubsubBufferSize = 3000 -// GossipMessage represents message gossiped via P2P network (e.g. transaction, Block etc). + type GossipMessage struct { Data []byte From peer.ID } -// GossiperOption sets optional parameters of Gossiper. + type GossiperOption func(*Gossiper) error type GossipMessageHandler func(ctx context.Context, gossipedBlock []byte) -// WithValidator options registers topic validator for Gossiper. + func WithValidator(validator GossipValidator) GossiperOption { return func(g *Gossiper) error { return g.ps.RegisterTopicValidator(g.topic.String(), wrapValidator(g, validator)) } } -// Gossiper is an abstraction of P2P publish subscribe mechanism. + type Gossiper struct { ownID peer.ID @@ -45,9 +45,9 @@ type Gossiper struct { logger types.Logger } -// NewGossiper creates new, ready to use instance of Gossiper. -// -// Returned Gossiper object can be used for sending (Publishing) and receiving messages in topic identified by topicStr. + + + func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler GossipMessageHandler, logger types.Logger, options ...GossiperOption) (*Gossiper, error) { topic, err := ps.Join(topicStr) if err != nil { @@ -76,7 +76,7 @@ func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler return g, nil } -// Close is used to disconnect from topic and free resources used by Gossiper. + func (g *Gossiper) Close() error { err := g.ps.UnregisterTopicValidator(g.topic.String()) g.sub.Cancel() @@ -86,12 +86,12 @@ func (g *Gossiper) Close() error { ) } -// Publish publishes data to gossip topic. + func (g *Gossiper) Publish(ctx context.Context, data []byte) error { return g.topic.Publish(ctx, data) } -// ProcessMessages waits for messages published in the topic and execute handler. + func (g *Gossiper) ProcessMessages(ctx context.Context) { for { msg, err := g.sub.Next(ctx) @@ -110,8 +110,8 @@ func (g *Gossiper) ProcessMessages(ctx context.Context) { func wrapValidator(gossiper *Gossiper, validator GossipValidator) pubsub.Validator { return func(_ context.Context, _ peer.ID, msg *pubsub.Message) bool { - // Make sure we don't process our own messages. - // In this case we'll want to return true but not to actually handle the message. + + if msg.GetFrom() == gossiper.ownID { return true } diff --git a/p2p/validator.go b/p2p/validator.go index 513714e4f..4c3b26c27 100644 --- a/p2p/validator.go +++ b/p2p/validator.go @@ -16,17 +16,17 @@ type StateGetter interface { GetRevision() uint64 } -// GossipValidator is a callback function type. + type GossipValidator func(*GossipMessage) bool -// IValidator is an interface for implementing validators of messages gossiped in the p2p network. + type IValidator interface { - // TxValidator creates a pubsub validator that uses the node's mempool to check the - // transaction. If the transaction is valid, then it is added to the mempool + + TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator } -// Validator is a validator for messages gossiped in the p2p network. + type Validator struct { logger types.Logger stateGetter StateGetter @@ -34,7 +34,7 @@ type Validator struct { var _ IValidator = (*Validator)(nil) -// NewValidator creates a new Validator. + func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { return &Validator{ logger: logger, @@ -42,9 +42,9 @@ func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { } } -// TxValidator creates a pubsub validator that uses the node's mempool to check the -// transaction. -// False means the TX is considered invalid and should not be gossiped. + + + func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator { return func(txMessage *GossipMessage) bool { v.logger.Debug("Transaction received.", "bytes", len(txMessage.Data)) @@ -59,7 +59,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo case errors.Is(err, mempool.ErrTxInCache): return true case errors.Is(err, mempool.ErrMempoolIsFull{}): - return true // we have no reason to believe that we should throw away the message + return true case errors.Is(err, mempool.ErrTxTooLarge{}): return false case errors.Is(err, mempool.ErrPreCheck{}): @@ -73,7 +73,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo } } -// BlockValidator runs basic checks on the gossiped block + func (v *Validator) BlockValidator() GossipValidator { return func(blockMsg *GossipMessage) bool { var gossipedBlock BlockData diff --git a/rpc/client/client.go b/rpc/client/client.go index d697476fb..e0b6b4a29 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -34,7 +34,7 @@ const ( defaultPerPage = 30 maxPerPage = 100 - // TODO(tzdybal): make this configurable + subscribeTimeout = 5 * time.Second ) @@ -46,20 +46,20 @@ const ( SLValidated ) -// ErrConsensusStateNotAvailable is returned because Dymint doesn't use Tendermint consensus. + var ErrConsensusStateNotAvailable = errors.New("consensus state not available in Dymint") var _ rpcclient.Client = &Client{} -// Client implements tendermint RPC client interface. -// -// This is the type that is used in communication between cosmos-sdk app and Dymint. + + + type Client struct { *tmtypes.EventBus config *config.RPCConfig node *node.Node - // cache of chunked genesis data. + genChunks []string } @@ -68,7 +68,7 @@ type ResultBlockValidated struct { Result BlockValidationStatus } -// NewClient returns Client working with given node. + func NewClient(node *node.Node) *Client { return &Client{ EventBus: node.EventBus(), @@ -77,7 +77,7 @@ func NewClient(node *node.Node) *Client { } } -// ABCIInfo returns basic information about application state. + func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := c.Query().InfoSync(proxy.RequestInfo) if err != nil { @@ -86,12 +86,12 @@ func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } -// ABCIQuery queries for data from application. + func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } -// ABCIQueryWithOptions queries for data from application. + func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { resQuery, err := c.Query().QuerySync(abci.RequestQuery{ Path: path, @@ -106,19 +106,19 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } -// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit + + func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // This implementation corresponds to Tendermints implementation from rpc/core/mempool.go. - // ctx.RemoteAddr godoc: If neither HTTPReq nor WSConn is set, an empty string is returned. - // This code is a local client, so we can assume that subscriber is "" - subscriber := "" // ctx.RemoteAddr() + + + + subscriber := "" if err := c.IsSubscriptionAllowed(subscriber); err != nil { return nil, sdkerrors.Wrap(err, "subscription not allowed") } - // Subscribe to tx being committed in block. + subCtx, cancel := context.WithTimeout(ctx, subscribeTimeout) defer cancel() q := tmtypes.EventQueryTxFor(tx) @@ -134,7 +134,7 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } }() - // add to mempool and wait for CheckTx result + checkTxResCh := make(chan *abci.Response, 1) err = c.node.Mempool.CheckTx(tx, func(res *abci.Response) { select { @@ -159,15 +159,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. }, nil } - // broadcast tx + err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failure to broadcast: %w", err) } - // Wait for the tx to be included in a block or timeout. + select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. + case msg := <-deliverTxSub.Out(): deliverTxRes, _ := msg.Data().(tmtypes.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -201,15 +201,15 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } } -// BroadcastTxAsync returns right away, with no response. Does not wait for -// CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async + + + func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { err := c.node.Mempool.CheckTx(tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - // gossipTx optimistically + err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failed to gossip: %w", err) @@ -217,9 +217,9 @@ func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.R return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } -// BroadcastTxSync returns with the response from CheckTx. Does not wait for -// DeliverTx result. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync + + + func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := c.node.Mempool.CheckTx(tx, func(res *abci.Response) { @@ -231,16 +231,16 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re res := <-resCh r := res.GetCheckTx() - // gossip the transaction if it's in the mempool. - // Note: we have to do this here because, unlike the tendermint mempool reactor, there - // is no routine that gossips transactions after they enter the pool + + + if r.Code == abci.CodeTypeOK { err = c.node.P2P.GossipTx(ctx, tx) if err != nil { - // the transaction must be removed from the mempool if it cannot be gossiped. - // if this does not occur, then the user will not be able to try again using - // this node, as the CheckTx call above will return an error indicating that - // the tx is already in the mempool + + + + _ = c.node.Mempool.RemoveTxByKey(tx.Key()) return nil, fmt.Errorf("gossip tx: %w", err) } @@ -255,7 +255,7 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re }, nil } -// Subscribe subscribe given subscriber to a query. + func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCap return outc, nil } -// Unsubscribe unsubscribes given subscriber from a query. + func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := tmquery.New(query) if err != nil { @@ -292,12 +292,12 @@ func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) erro return c.EventBus.Unsubscribe(ctx, subscriber, q) } -// Genesis returns entire genesis. + func (c *Client) Genesis(_ context.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: c.node.GetGenesis()}, nil } -// GenesisChunked returns given chunk of genesis. + func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { genChunks, err := c.GetGenesisChunks() if err != nil { @@ -312,19 +312,19 @@ func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenes return nil, fmt.Errorf("service configuration error, there are no chunks") } - // it's safe to do uint(chunkLen)-1 (no overflow) since we always have at least one chunk here + if id > uint(chunkLen)-1 { return nil, fmt.Errorf("there are %d chunks, %d is invalid", chunkLen-1, id) } return &ctypes.ResultGenesisChunk{ TotalChunks: chunkLen, - ChunkNumber: int(id), //nolint:gosec // id is always positive + ChunkNumber: int(id), Data: genChunks[id], }, nil } -// BlockchainInfo returns ABCI block meta information for given height range. + func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -336,8 +336,8 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) baseHeight = 1 } minHeight, maxHeight, err = filterMinMax( - int64(baseHeight), //nolint:gosec // height is non-negative and falls in int64 - int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 + int64(baseHeight), + int64(c.node.GetBlockManagerHeight()), minHeight, maxHeight, limit) @@ -348,7 +348,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) blocks := make([]*tmtypes.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { - block, err := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 + block, err := c.node.Store.LoadBlock(uint64(height)) if err != nil { return nil, err } @@ -362,12 +362,12 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) } return &ctypes.ResultBlockchainInfo{ - LastHeight: int64(c.node.GetBlockManagerHeight()), //nolint:gosec // height is non-negative and falls in int64 + LastHeight: int64(c.node.GetBlockManagerHeight()), BlockMetas: blocks, }, nil } -// NetInfo returns basic information about client P2P connections. + func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { res := ctypes.ResultNetInfo{ Listening: true, @@ -389,24 +389,24 @@ func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return &res, nil } -// DumpConsensusState always returns error as there is no consensus state in Dymint. + func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return nil, ErrConsensusStateNotAvailable } -// ConsensusState always returns error as there is no consensus state in Dymint. + func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return nil, ErrConsensusStateNotAvailable } -// ConsensusParams returns consensus params at given height. -// -// Currently, consensus params changes are not supported and this method returns params as defined in genesis. + + + func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - // TODO(tzdybal): implement consensus params handling: https://github.com/dymensionxyz/dymint/issues/291 + params := c.node.GetGenesis().ConsensusParams return &ctypes.ResultConsensusParams{ - BlockHeight: int64(c.normalizeHeight(height)), //nolint:gosec // height is non-negative and falls in int64 + BlockHeight: int64(c.normalizeHeight(height)), ConsensusParams: tmproto.ConsensusParams{ Block: tmproto.BlockParams{ MaxBytes: params.Block.MaxBytes, @@ -428,14 +428,14 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re }, nil } -// Health endpoint returns empty value. It can be used to monitor service availability. + func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } -// Block method returns BlockID and block itself for given height. -// -// If height is nil, it returns information about last known block. + + + func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { heightValue := c.normalizeHeight(height) block, err := c.node.Store.LoadBlock(heightValue) @@ -459,7 +459,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, }, nil } -// BlockByHash returns BlockID and block itself for given hash. + func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { var h [32]byte copy(h[:], hash) @@ -485,13 +485,13 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl }, nil } -// BlockResults returns information about transactions, events and updates of validator set and consensus params. + func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h uint64 if height == nil { h = c.node.GetBlockManagerHeight() } else { - h = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 + h = uint64(*height) } resp, err := c.node.Store.LoadBlockResponses(h) if err != nil { @@ -499,7 +499,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } return &ctypes.ResultBlockResults{ - Height: int64(h), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(h), TxsResults: resp.DeliverTxs, BeginBlockEvents: resp.BeginBlock.Events, EndBlockEvents: resp.EndBlock.Events, @@ -508,7 +508,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul }, nil } -// Commit returns signed header (aka commit) at given height. + func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { heightValue := c.normalizeHeight(height) com, err := c.node.Store.LoadCommit(heightValue) @@ -528,7 +528,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return ctypes.NewResultCommit(&block.Header, commit, true), nil } -// Validators returns paginated list of validators at given height. + func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (*ctypes.ResultValidators, error) { height := c.normalizeHeight(heightPtr) @@ -538,14 +538,14 @@ func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (* } return &ctypes.ResultValidators{ - BlockHeight: int64(height), //nolint:gosec // height is non-negative and falls in int64 + BlockHeight: int64(height), Validators: proposer.TMValidators(), Count: 1, Total: 1, }, nil } -// Tx returns detailed information about transaction identified by its hash. + func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { res, err := c.node.TxIndexer.Get(hash) if err != nil { @@ -561,8 +561,8 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul var proof tmtypes.TxProof if prove { - block, _ := c.node.Store.LoadBlock(uint64(height)) //nolint:gosec // height is non-negative and falls in int64 - blockProof := block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + block, _ := c.node.Store.LoadBlock(uint64(height)) + blockProof := block.Data.Txs.Proof(int(index)) proof = tmtypes.TxProof{ RootHash: blockProof.RootHash, Data: tmtypes.Tx(blockProof.Data), @@ -580,7 +580,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul }, nil } -// TxSearch returns detailed information about transactions matching query. + func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*ctypes.ResultTxSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -592,7 +592,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, err } - // sort results (must be done before pagination) + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -612,7 +612,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - // paginate results + totalCount := len(results) perPage := validatePerPage(perPagePtr) @@ -629,10 +629,7 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr r := results[i] var proof tmtypes.TxProof - /*if prove { - block := nil //env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines - }*/ + apiResults = append(apiResults, &ctypes.ResultTx{ Hash: tmtypes.Tx(r.Tx).Hash(), @@ -647,8 +644,8 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } -// BlockSearch defines a method to search for a paginated set of blocks by -// BeginBlock and EndBlock event search criteria. + + func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -660,7 +657,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, err } - // Sort the results + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -675,7 +672,7 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - // Paginate + totalCount := len(results) perPageVal := validatePerPage(perPage) @@ -687,10 +684,10 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i skipCount := validateSkipCount(pageVal, perPageVal) pageSize := tmmath.MinInt(perPageVal, totalCount-skipCount) - // Fetch the blocks + blocks := make([]*ctypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { - b, err := c.node.Store.LoadBlock(uint64(results[i])) //nolint:gosec // height is non-negative and falls in int64 + b, err := c.node.Store.LoadBlock(uint64(results[i])) if err != nil { return nil, err } @@ -709,11 +706,11 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return &ctypes.ResultBlockSearch{Blocks: blocks, TotalCount: totalCount}, nil } -// Status returns detailed information about current status of the node. + func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { latest, err := c.node.Store.LoadBlock(c.node.GetBlockManagerHeight()) if err != nil { - // TODO(tzdybal): extract error + return nil, fmt.Errorf("find latest block: %w", err) } @@ -739,7 +736,7 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { txIndexerStatus := "on" result := &ctypes.ResultStatus{ - // TODO(ItzhakBokris): update NodeInfo fields + NodeInfo: p2p.DefaultNodeInfo{ ProtocolVersion: defaultProtocolVersion, DefaultNodeID: id, @@ -756,18 +753,18 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash[:], LatestAppHash: latestAppHash[:], - LatestBlockHeight: int64(latestHeight), //nolint:gosec // height is non-negative and falls in int64 + LatestBlockHeight: int64(latestHeight), LatestBlockTime: latestBlockTime, - // CatchingUp is true if the node is not at the latest height received from p2p or da. + CatchingUp: c.node.BlockManager.TargetHeight.Load() > latestHeight, - // TODO(tzdybal): add missing fields - // EarliestBlockHash: earliestBlockHash, - // EarliestAppHash: earliestAppHash, - // EarliestBlockHeight: earliestBloc - // kHeight, - // EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + + + + + + }, - // TODO(ItzhakBokris): update ValidatorInfo fields + ValidatorInfo: ctypes.ValidatorInfo{ Address: tmbytes.HexBytes(proposer.ConsAddress()), PubKey: proposer.PubKey(), @@ -777,14 +774,14 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { return result, nil } -// BroadcastEvidence is not yet implemented. + func (c *Client) BroadcastEvidence(ctx context.Context, evidence tmtypes.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return &ctypes.ResultBroadcastEvidence{ Hash: evidence.Hash(), }, nil } -// NumUnconfirmedTxs returns information about transactions in mempool. + func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: c.node.Mempool.Size(), @@ -793,9 +790,9 @@ func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirm }, nil } -// UnconfirmedTxs returns transactions in mempool. + func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator + limit := validatePerPage(limitPtr) txs := c.node.Mempool.ReapMaxTxs(limit) @@ -807,9 +804,9 @@ func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.Res }, nil } -// CheckTx executes a new transaction against the application to determine its validity. -// -// If valid, the tx is automatically added to the mempool. + + + func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultCheckTx, error) { res, err := c.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { @@ -820,20 +817,20 @@ func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultChec func (c *Client) BlockValidated(height *int64) (*ResultBlockValidated, error) { _, _, chainID := c.node.P2P.Info() - // invalid height + if height == nil || *height < 0 { return &ResultBlockValidated{Result: -1, ChainID: chainID}, nil } - // node has not reached the height yet - if uint64(*height) > c.node.BlockManager.State.Height() { //nolint:gosec // height is non-negative and falls in int64 + + if uint64(*height) > c.node.BlockManager.State.Height() { return &ResultBlockValidated{Result: NotValidated, ChainID: chainID}, nil } - if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { //nolint:gosec // height is non-negative and falls in int64 + if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { return &ResultBlockValidated{Result: SLValidated, ChainID: chainID}, nil } - // block is applied, and therefore it is validated at block level but not at state update level + return &ResultBlockValidated{Result: P2PValidated, ChainID: chainID}, nil } @@ -859,7 +856,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String()) sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped + if sub == nil { return } case <-c.Quit(): @@ -868,7 +865,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm } } -// Try to resubscribe with exponential backoff. + func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscription { attempts := uint(0) for { @@ -882,7 +879,7 @@ func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscr } attempts++ - time.Sleep((10 << attempts) * time.Millisecond) // 10ms -> 20ms -> 40ms + time.Sleep((10 << attempts) * time.Millisecond) } } @@ -907,7 +904,7 @@ func (c *Client) normalizeHeight(height *int64) uint64 { if height == nil || *height == 0 { heightValue = c.node.GetBlockManagerHeight() } else { - heightValue = uint64(*height) //nolint:gosec // height is non-negative and falls in int64 + heightValue = uint64(*height) } return heightValue @@ -924,7 +921,7 @@ func (c *Client) IsSubscriptionAllowed(subscriber string) error { } func validatePerPage(perPagePtr *int) int { - if perPagePtr == nil { // no per_page parameter + if perPagePtr == nil { return defaultPerPage } @@ -942,13 +939,13 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) } - if pagePtr == nil || *pagePtr <= 0 { // no page parameter + if pagePtr == nil || *pagePtr <= 0 { return 1, nil } pages := ((totalCount - 1) / perPage) + 1 if pages == 0 { - pages = 1 // one page (even if it's empty) + pages = 1 } page := *pagePtr if page > pages { @@ -968,12 +965,12 @@ func validateSkipCount(page, perPage int) int { } func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { - // filter negatives + if min < 0 || max < 0 { return min, max, errors.New("height must be greater than zero") } - // adjust for default values + if min == 0 { min = 1 } @@ -981,14 +978,14 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { max = height } - // limit max to the height + max = tmmath.MinInt64(height, max) - // limit min to the base + min = tmmath.MaxInt64(base, min) - // limit min to within `limit` of max - // so the total number of blocks returned will be `limit` + + min = tmmath.MaxInt64(min, max-limit+1) if min > max { diff --git a/rpc/client/utils.go b/rpc/client/utils.go index 894c60547..04ec93e09 100644 --- a/rpc/client/utils.go +++ b/rpc/client/utils.go @@ -8,12 +8,12 @@ import ( ) const ( - // genesisChunkSize is the maximum size, in bytes, of each - // chunk in the genesis structure for the chunked API - genesisChunkSize = 16 * 1024 * 1024 // 16 MiB + + + genesisChunkSize = 16 * 1024 * 1024 ) -// GetGenesisChunks returns chunked version of genesis. + func (c *Client) GetGenesisChunks() ([]string, error) { if c.genChunks != nil { return c.genChunks, nil @@ -26,8 +26,8 @@ func (c *Client) GetGenesisChunks() ([]string, error) { return c.genChunks, err } -// initGenesisChunks creates a chunked format of the genesis document to make it easier to -// iterate through larger genesis structures. + + func (c *Client) initGenesisChunks(genesis *tmtypes.GenesisDoc) error { if genesis == nil { return nil diff --git a/rpc/json/handler.go b/rpc/json/handler.go index af33eed97..46d70f126 100644 --- a/rpc/json/handler.go +++ b/rpc/json/handler.go @@ -49,21 +49,21 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mux.ServeHTTP(w, r) } -// serveJSONRPC serves HTTP request + func (h *handler) serveJSONRPC(w http.ResponseWriter, r *http.Request) { h.serveJSONRPCforWS(w, r, nil) } -// serveJSONRPC serves HTTP request -// implementation is highly inspired by Gorilla RPC v2 (but simplified a lot) + + func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsConn *wsConn) { - // Create a new codec request. + codecReq := h.codec.NewRequest(r) - // Get service method to be called. + method, err := codecReq.Method() if err != nil { if e, ok := err.(*json2.Error); method == "" && ok && e.Message == "EOF" { - // just serve empty page if request is empty + return } codecReq.WriteError(w, http.StatusBadRequest, err) @@ -76,7 +76,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo return } - // Decode the args. + args := reflect.New(methodSpec.argsType) if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { codecReq.WriteError(w, http.StatusBadRequest, errRead) @@ -98,7 +98,7 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo } rets := methodSpec.m.Call(callArgs) - // Extract the result to error if needed. + var errResult error statusCode := http.StatusOK errInter := rets[1].Interface() @@ -107,11 +107,11 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo errResult, _ = errInter.(error) } - // Prevents Internet Explorer from MIME-sniffing a response away - // from the declared content-type + + w.Header().Set("x-content-type-options", "nosniff") - // Encode the response. + if errResult == nil { var raw json.RawMessage raw, err = tmjson.Marshal(rets[0].Interface()) @@ -153,7 +153,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http case reflect.String: args.Elem().Field(i).SetString(rawVal) case reflect.Slice: - // []byte is a reflect.Slice of reflect.Uint8's + if field.Type.Elem().Kind() == reflect.Uint8 { err = setByteSliceParam(rawVal, &args, i) } @@ -172,7 +172,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http args, }) - // Extract the result to error if needed. + statusCode := http.StatusOK errInter := rets[1].Interface() if errInter != nil { @@ -185,8 +185,8 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http } func (h *handler) encodeAndWriteResponse(w http.ResponseWriter, result interface{}, errResult error, statusCode int) { - // Prevents Internet Explorer from MIME-sniffing a response away - // from the declared content-type + + w.Header().Set("x-content-type-options", "nosniff") w.Header().Set("Content-Type", "application/json; charset=utf-8") diff --git a/rpc/json/service.go b/rpc/json/service.go index e9c1c8e08..e1952f770 100644 --- a/rpc/json/service.go +++ b/rpc/json/service.go @@ -20,13 +20,13 @@ import ( ) const ( - // defaultSubscribeTimeout is the default timeout for a subscription. + defaultSubscribeTimeout = 5 * time.Second - // defaultSubscribeBufferSize is the default buffer size for a subscription. + defaultSubscribeBufferSize = 100 ) -// GetHTTPHandler returns handler configured to serve Tendermint-compatible RPC. + func GetHTTPHandler(l *client.Client, logger types.Logger, opts ...option) (http.Handler, error) { return newHandler(newService(l, logger, opts...), json2.NewCodec(), logger), nil } @@ -137,9 +137,9 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } go func(subscriptionID []byte) { for msg := range out { - // build the base response + var resp rpctypes.RPCResponse - // Check if subscriptionID is string or int and generate the rest of the response accordingly + subscriptionIDInt, err := strconv.Atoi(string(subscriptionID)) if err != nil { s.logger.Info("Failed to convert subscriptionID to int") @@ -147,7 +147,7 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } else { resp = rpctypes.NewRPCSuccessResponse(rpctypes.JSONRPCIntID(subscriptionIDInt), msg) } - // Marshal response to JSON and send it to the websocket queue + jsonBytes, err := json.MarshalIndent(resp, "", " ") if err != nil { s.logger.Error("marshal RPCResponse to JSON", "err", err) @@ -180,7 +180,7 @@ func (s *service) UnsubscribeAll(req *http.Request, args *unsubscribeAllArgs) (* return &emptyResult{}, nil } -// info API + func (s *service) Health(req *http.Request, args *healthArgs) (*ctypes.ResultHealth, error) { return s.client.Health(req.Context()) } @@ -202,7 +202,7 @@ func (s *service) Genesis(req *http.Request, args *genesisArgs) (*ctypes.ResultG } func (s *service) GenesisChunked(req *http.Request, args *genesisChunkedArgs) (*ctypes.ResultGenesisChunk, error) { - return s.client.GenesisChunked(req.Context(), uint(args.ID)) //nolint:gosec // id is always positive + return s.client.GenesisChunked(req.Context(), uint(args.ID)) } func (s *service) Block(req *http.Request, args *blockArgs) (*ctypes.ResultBlock, error) { @@ -261,7 +261,7 @@ func (s *service) NumUnconfirmedTxs(req *http.Request, args *numUnconfirmedTxsAr return s.client.NumUnconfirmedTxs(req.Context()) } -// tx broadcast API + func (s *service) BroadcastTxCommit(req *http.Request, args *broadcastTxCommitArgs) (*ctypes.ResultBroadcastTxCommit, error) { return s.client.BroadcastTxCommit(req.Context(), args.Tx) } @@ -274,7 +274,7 @@ func (s *service) BroadcastTxAsync(req *http.Request, args *broadcastTxAsyncArgs return s.client.BroadcastTxAsync(req.Context(), args.Tx) } -// abci API + func (s *service) ABCIQuery(req *http.Request, args *ABCIQueryArgs) (*ctypes.ResultABCIQuery, error) { return s.client.ABCIQueryWithOptions(req.Context(), args.Path, args.Data, rpcclient.ABCIQueryOptions{ Height: int64(args.Height), @@ -286,7 +286,7 @@ func (s *service) ABCIInfo(req *http.Request, args *ABCIInfoArgs) (*ctypes.Resul return s.client.ABCIInfo(req.Context()) } -// evidence API + func (s *service) BroadcastEvidence(req *http.Request, args *broadcastEvidenceArgs) (*ctypes.ResultBroadcastEvidence, error) { return s.client.BroadcastEvidence(req.Context(), args.Evidence) } diff --git a/rpc/json/types.go b/rpc/json/types.go index 19f1f8513..23e84dff6 100644 --- a/rpc/json/types.go +++ b/rpc/json/types.go @@ -18,7 +18,7 @@ type unsubscribeArgs struct { } type unsubscribeAllArgs struct{} -// info API + type ( healthArgs struct{} statusArgs struct{} @@ -86,7 +86,7 @@ type unconfirmedTxsArgs struct { } type numUnconfirmedTxsArgs struct{} -// tx broadcast API + type broadcastTxCommitArgs struct { Tx types.Tx `json:"tx"` } @@ -97,9 +97,9 @@ type broadcastTxAsyncArgs struct { Tx types.Tx `json:"tx"` } -// abci API -// ABCIQueryArgs defines args for ABCI Query method. + + type ABCIQueryArgs struct { Path string `json:"path"` Data bytes.HexBytes `json:"data"` @@ -107,10 +107,10 @@ type ABCIQueryArgs struct { Prove bool `json:"prove"` } -// ABCIInfoArgs defines args for ABCI Info method. + type ABCIInfoArgs struct{} -// evidence API + type broadcastEvidenceArgs struct { Evidence types.Evidence `json:"evidence"` @@ -118,20 +118,20 @@ type broadcastEvidenceArgs struct { type emptyResult struct{} -// JSON-deserialization specific types -// StrInt is an proper int or quoted "int" + + type StrInt int -// StrInt64 is an proper int64 or quoted "int64" + type StrInt64 int64 -// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt64 + func (s *StrInt64) UnmarshalJSON(b []byte) error { return unmarshalStrInt64(b, s) } -// UnmarshalJSON parses JSON (int or int qouted as string) into StrInt + func (s *StrInt) UnmarshalJSON(b []byte) error { var val StrInt64 err := unmarshalStrInt64(b, &val) diff --git a/rpc/json/ws.go b/rpc/json/ws.go index a086ba980..a9728e5a9 100644 --- a/rpc/json/ws.go +++ b/rpc/json/ws.go @@ -40,7 +40,7 @@ func (wsc *wsConn) sendLoop() { } func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { - // TODO(tzdybal): configuration options + upgrader := websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, @@ -89,7 +89,7 @@ func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { } if mt != websocket.TextMessage { - // TODO(tzdybal): https://github.com/dymensionxyz/dymint/issues/465 + h.logger.Debug("expected text message") continue } @@ -111,14 +111,14 @@ func newResponseWriter(w io.Writer) http.ResponseWriter { return &wsResponse{w} } -// wsResponse is a simple implementation of http.ResponseWriter + type wsResponse struct { w io.Writer } var _ http.ResponseWriter = wsResponse{} -// Write use underlying writer to write response to WebSocket + func (w wsResponse) Write(bytes []byte) (int, error) { return w.w.Write(bytes) } diff --git a/rpc/middleware/client.go b/rpc/middleware/client.go index 32c232564..6d175fb2b 100644 --- a/rpc/middleware/client.go +++ b/rpc/middleware/client.go @@ -6,14 +6,14 @@ import ( "github.com/tendermint/tendermint/libs/log" ) -// Client is a struct that holds registered middlewares and provides methods -// to run these middlewares on an HTTP handler. + + type Client struct { registry *Registry logger log.Logger } -// NewClient creates and returns a new Client instance. + func NewClient(reg Registry, logger log.Logger) *Client { return &Client{ registry: ®, @@ -21,7 +21,7 @@ func NewClient(reg Registry, logger log.Logger) *Client { } } -// Handle wraps the provided http.Handler with the registered middlewares and returns the final http.Handler. + func (mc *Client) Handle(h http.Handler) http.Handler { registeredMiddlewares := mc.registry.GetRegistered() finalHandler := h diff --git a/rpc/middleware/registry.go b/rpc/middleware/registry.go index 9cbf9a795..70a1b2222 100644 --- a/rpc/middleware/registry.go +++ b/rpc/middleware/registry.go @@ -12,20 +12,20 @@ var ( instance *Registry ) -// HandlerFunc is a type alias for a function that takes an http.Handler and returns a new http.Handler. + type HandlerFunc func(http.Handler) http.Handler -// Middleware is an interface representing a middleware with a Handler method. + type Middleware interface { Handler(logger log.Logger) HandlerFunc } -// Registry is a struct that holds a list of registered middlewares. + type Registry struct { middlewareList []Middleware } -// GetRegistry returns a singleton instance of the Registry. + func GetRegistry() *Registry { once.Do(func() { instance = &Registry{} @@ -33,12 +33,12 @@ func GetRegistry() *Registry { return instance } -// Register adds a Middleware to the list of registered middlewares in the Registry. + func (r *Registry) Register(m Middleware) { r.middlewareList = append(r.middlewareList, m) } -// GetRegistered returns a list of registered middlewares. + func (r *Registry) GetRegistered() []Middleware { return r.middlewareList } diff --git a/rpc/middleware/status.go b/rpc/middleware/status.go index 01e16e559..16172aa48 100644 --- a/rpc/middleware/status.go +++ b/rpc/middleware/status.go @@ -16,7 +16,7 @@ func (s Status) Handler(logger log.Logger) HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := s.Err() isHealthy := err == nil - // in case the endpoint is health we return health response + if r.URL.Path == "/health" { w.WriteHeader(http.StatusOK) diff --git a/rpc/server.go b/rpc/server.go index 6368d4ff1..9eafb9f91 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -26,7 +26,7 @@ import ( "github.com/dymensionxyz/dymint/rpc/middleware" ) -// Server handles HTTP and JSON-RPC requests, exposing Tendermint-compatible API. + type Server struct { *service.BaseService @@ -43,21 +43,21 @@ type Server struct { const ( onStopTimeout = 5 * time.Second - // readHeaderTimeout is the timeout for reading the request headers. + readHeaderTimeout = 5 * time.Second ) -// Option is a function that configures the Server. + type Option func(*Server) -// WithListener is an option that sets the listener. + func WithListener(listener net.Listener) Option { return func(d *Server) { d.listener = listener } } -// NewServer creates new instance of Server with given configuration. + func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, options ...Option) *Server { srv := &Server{ config: config, @@ -66,16 +66,16 @@ func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, opt } srv.BaseService = service.NewBaseService(logger, "RPC", srv) - // Apply options + for _, option := range options { option(srv) } return srv } -// Client returns a Tendermint-compatible rpc Client instance. -// -// This method is called in cosmos-sdk. + + + func (s *Server) Client() rpcclient.Client { return s.client } @@ -84,13 +84,13 @@ func (s *Server) PubSubServer() *pubsub.Server { return s.node.PubSubServer() } -// OnStart is called when Server is started (see service.BaseService for details). + func (s *Server) OnStart() error { s.startEventListener() return s.startRPC() } -// OnStop is called when Server is stopped (see service.BaseService for details). + func (s *Server) OnStop() { ctx, cancel := context.WithTimeout(context.Background(), onStopTimeout) defer cancel() @@ -99,12 +99,12 @@ func (s *Server) OnStop() { } } -// startEventListener registers events to callbacks. + func (s *Server) startEventListener() { go uevent.MustSubscribe(context.Background(), s.PubSubServer(), "RPCNodeHealthStatusHandler", events.QueryHealthStatus, s.onNodeHealthUpdate, s.Logger) } -// onNodeHealthUpdate is a callback function that handles health status events from the node. + func (s *Server) onNodeHealthUpdate(event pubsub.Message) { eventData, _ := event.Data().(*events.DataHealthStatus) if eventData.Error != nil { @@ -169,13 +169,13 @@ func (s *Server) startRPC() error { handler = c.Handler(handler) } - // Apply Middleware + reg := middleware.GetRegistry() reg.Register(middleware.Status{Err: s.getHealthStatus}) middlewareClient := middleware.NewClient(*reg, s.Logger.With("module", "rpc/middleware")) handler = middlewareClient.Handle(handler) - // Start HTTP server + go func() { err := s.serve(listener, handler) if !errors.Is(err, http.ErrServerClosed) { diff --git a/settlement/config.go b/settlement/config.go index 3f01909e0..4895849fd 100644 --- a/settlement/config.go +++ b/settlement/config.go @@ -5,7 +5,7 @@ import ( "time" ) -// Config for the DymensionLayerClient + type Config struct { KeyringBackend string `mapstructure:"keyring_backend"` NodeAddress string `mapstructure:"settlement_node_address"` @@ -19,9 +19,9 @@ type Config struct { RetryMinDelay time.Duration `mapstructure:"retry_min_delay"` BatchAcceptanceTimeout time.Duration `mapstructure:"batch_acceptance_timeout"` BatchAcceptanceAttempts uint `mapstructure:"batch_acceptance_attempts"` - // For testing only. probably should be refactored + ProposerPubKey string `json:"proposer_pub_key"` - // Config used for sl shared grpc mock + SLGrpc GrpcConfig `mapstructure:",squash"` } diff --git a/settlement/dymension/cosmosclient.go b/settlement/dymension/cosmosclient.go index 3e90eb499..7feaab2f3 100644 --- a/settlement/dymension/cosmosclient.go +++ b/settlement/dymension/cosmosclient.go @@ -17,10 +17,10 @@ import ( sequencertypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) -// CosmosClient is an interface for interacting with cosmos client chains. -// It is a wrapper around the cosmos client in order to provide with an interface which can be implemented by -// other clients and can easily be mocked for testing purposes. -// Currently it contains only the methods that are used by the dymension hub client. + + + + type CosmosClient interface { Context() sdkclient.Context StartEventListener() error @@ -41,7 +41,7 @@ type cosmosClient struct { var _ CosmosClient = &cosmosClient{} -// NewCosmosClient creates a new cosmos client + func NewCosmosClient(client cosmosclient.Client) CosmosClient { return &cosmosClient{client} } diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index 6a995ef69..101aab439 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -38,7 +38,7 @@ const ( postBatchSubscriberPrefix = "postBatchSubscriber" ) -// Client is the client for the Dymension Hub. + type Client struct { config *settlement.Config rollappId string @@ -58,7 +58,7 @@ type Client struct { var _ settlement.ClientI = &Client{} -// Init is called once. it initializes the struct members. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { interfaceRegistry := cdctypes.NewInterfaceRegistry() cryptocodec.RegisterInterfaces(interfaceRegistry) @@ -76,7 +76,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub c.retryMinDelay = config.RetryMinDelay c.retryMaxDelay = config.RetryMaxDelay - // Apply options + for _, apply := range options { apply(c) } @@ -96,7 +96,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub return nil } -// Start starts the HubClient. + func (c *Client) Start() error { err := c.cosmosClient.StartEventListener() if err != nil { @@ -106,31 +106,31 @@ func (c *Client) Start() error { return nil } -// Stop stops the HubClient. + func (c *Client) Stop() error { return c.cosmosClient.StopEventListener() } -// SubmitBatch posts a batch to the Dymension Hub. it tries to post the batch until it is accepted by the settlement layer. -// it emits success and failure events to the event bus accordingly. + + func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.ResultSubmitBatch) error { msgUpdateState, err := c.convertBatchToMsgUpdateState(batch, daResult) if err != nil { return fmt.Errorf("convert batch to msg update state: %w", err) } - // TODO: probably should be changed to be a channel, as the eventHandler is also in the HubClient in he produces the event + postBatchSubscriberClient := fmt.Sprintf("%s-%d-%s", postBatchSubscriberPrefix, batch.StartHeight(), uuid.New().String()) subscription, err := c.pubsub.Subscribe(c.ctx, postBatchSubscriberClient, settlement.EventQueryNewSettlementBatchAccepted, 1000) if err != nil { return fmt.Errorf("pub sub subscribe to settlement state updates: %w", err) } - //nolint:errcheck + defer c.pubsub.UnsubscribeAll(c.ctx, postBatchSubscriberClient) for { - // broadcast loop: broadcast the transaction to the blockchain (with infinite retries). + err := c.RunWithRetryInfinitely(func() error { err := c.broadcastBatch(msgUpdateState) if err != nil { @@ -154,7 +154,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul return fmt.Errorf("broadcast batch: %w", err) } - // Batch was submitted successfully. Wait for it to be accepted by the settlement layer. + timer := time.NewTimer(c.batchAcceptanceTimeout) defer timer.Stop() attempt := uint64(1) @@ -171,20 +171,20 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul eventData, _ := event.Data().(*settlement.EventDataNewBatch) if eventData.EndHeight != batch.EndHeight() { c.logger.Debug("Received event for a different batch, ignoring.", "event", eventData) - continue // continue waiting for acceptance of the current batch + continue } c.logger.Info("Batch accepted.", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight(), "stateIndex", eventData.StateIndex, "dapath", msgUpdateState.DAPath) return nil case <-timer.C: - // Check if the batch was accepted by the settlement layer, and we've just missed the event. + includedBatch, err := c.pollForBatchInclusion(batch.EndHeight()) timer.Reset(c.batchAcceptanceTimeout) - // no error, but still not included + if err == nil && !includedBatch { attempt++ if attempt <= uint64(c.batchAcceptanceAttempts) { - continue // continue waiting for acceptance of the current batch + continue } c.logger.Error( "Timed out waiting for batch inclusion on settlement layer", @@ -193,7 +193,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "endHeight", batch.EndHeight(), ) - break // breaks the switch case, and goes back to the broadcast loop + break } if err != nil { c.logger.Error( @@ -205,13 +205,13 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "error", err, ) - continue // continue waiting for acceptance of the current batch + continue } - // all good + c.logger.Info("Batch accepted", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight()) return nil } - break // failed waiting for acceptance. broadcast the batch again + break } } } @@ -237,7 +237,7 @@ func (c *Client) getStateInfo(index, height *uint64) (res *rollapptypes.QueryGet if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { // not supposed to happen + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return @@ -259,13 +259,13 @@ func (c *Client) getLatestHeight(finalized bool) (res *rollapptypes.QueryGetLate if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { // not supposed to happen + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return } -// GetLatestBatch returns the latest batch from the Dymension Hub. + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, nil) if err != nil { @@ -274,7 +274,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetBatchAtIndex returns the batch at the given index from the Dymension Hub. + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(&index, nil) if err != nil { @@ -283,7 +283,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetBatchAtHeight returns the batch at the given height from the Dymension Hub. + func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, &height) if err != nil { @@ -292,7 +292,7 @@ func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatc return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { res, err := c.getLatestHeight(false) if err != nil { @@ -301,7 +301,7 @@ func (c *Client) GetLatestHeight() (uint64, error) { return res.Height, nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { res, err := c.getLatestHeight(true) if err != nil { @@ -310,16 +310,16 @@ func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return res.Height, nil } -// GetProposerAtHeight return the proposer at height. -// In case of negative height, it will return the latest proposer. + + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { - // Get all sequencers to find the proposer address + seqs, err := c.GetAllSequencers() if err != nil { return nil, fmt.Errorf("get bonded sequencers: %w", err) } - // Get either latest proposer or proposer at height + var proposerAddr string if height < 0 { proposerAddr, err = c.getLatestProposer() @@ -327,12 +327,12 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("get latest proposer: %w", err) } } else { - // Get the state info for the relevant height and get address from there + res, err := c.GetBatchAtHeight(uint64(height)) - // if case of height not found, it may be because it didn't arrive to the hub yet. - // In that case we want to return the current proposer. + + if err != nil { - // If batch not found, fallback to latest proposer + if errors.Is(err, gerrc.ErrNotFound) { proposerAddr, err = c.getLatestProposer() if err != nil { @@ -350,7 +350,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer is sentinel") } - // Find and return the matching sequencer + for _, seq := range seqs { if seq.SettlementAddress == proposerAddr { return &seq, nil @@ -359,7 +359,7 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer not found") } -// GetSequencerByAddress returns a sequencer by its address. + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { var res *sequencertypes.QueryGetSequencerResponse req := &sequencertypes.QueryGetSequencerRequest{ @@ -402,7 +402,7 @@ func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) ), nil } -// GetAllSequencers returns all sequencers of the given rollapp. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappResponse req := &sequencertypes.QueryGetSequencersByRollappRequest{ @@ -425,7 +425,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return nil, err } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -455,7 +455,7 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return sequencerList, nil } -// GetBondedSequencers returns the bonded sequencers of the given rollapp. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappByStatusResponse req := &sequencertypes.QueryGetSequencersByRollappByStatusRequest{ @@ -479,7 +479,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return nil, err } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -508,10 +508,10 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return sequencerList, nil } -// GetNextProposer returns the next proposer on the hub. -// In case the current proposer is the next proposer, it returns nil. -// in case there is no next proposer, it returns an empty sequencer struct. -// in case there is a next proposer, it returns the next proposer. + + + + func (c *Client) GetNextProposer() (*types.Sequencer, error) { var ( nextAddr string @@ -577,7 +577,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -586,7 +586,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return &rollapp, nil } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { var res *rollapptypes.QueryObsoleteDRSVersionsResponse req := &rollapptypes.QueryObsoleteDRSVersionsRequest{} @@ -606,7 +606,7 @@ func (c *Client) GetObsoleteDrs() ([]uint32, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - // not supposed to happen, but just in case + if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -694,7 +694,7 @@ func getCosmosClientOptions(config *settlement.Config) []cosmosclient.Option { return options } -// pollForBatchInclusion polls the hub for the inclusion of a batch with the given end height. + func (c *Client) pollForBatchInclusion(batchEndHeight uint64) (bool, error) { latestBatch, err := c.GetLatestBatch() if err != nil { @@ -768,7 +768,7 @@ func (c *Client) ValidateGenesisBridgeData(data rollapptypes.GenesisBridgeData) return fmt.Errorf("rollapp client: validate genesis bridge: %w", err) } - // not supposed to happen, but just in case + if res == nil { return fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } diff --git a/settlement/dymension/events.go b/settlement/dymension/events.go index ba0a2849e..29280911a 100644 --- a/settlement/dymension/events.go +++ b/settlement/dymension/events.go @@ -12,7 +12,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) -// TODO: use types and attributes from dymension proto + const ( eventStateUpdateFmt = "state_update.rollapp_id='%s' AND state_update.status='PENDING'" eventStateUpdateFinalizedFmt = "state_update.rollapp_id='%s' AND state_update.status='FINALIZED'" @@ -42,7 +42,7 @@ func (c *Client) eventHandler() { eventRotationStartedQ := fmt.Sprintf(eventRotationStartedFmt, c.rollappId) eventStateUpdateFinalizedQ := fmt.Sprintf(eventStateUpdateFinalizedFmt, c.rollappId) - // TODO: add validation callback for the event data + eventMap := map[string]string{ eventStateUpdateQ: settlement.EventNewBatchAccepted, eventSequencersListQ: settlement.EventNewBondedSequencer, @@ -66,7 +66,7 @@ func (c *Client) eventHandler() { if err != nil { panic(fmt.Errorf("subscribe to events (%s): %w", eventStateUpdateFinalizedQ, err)) } - defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) //nolint:errcheck + defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) for { var e ctypes.ResultEvent @@ -74,7 +74,7 @@ func (c *Client) eventHandler() { case <-c.ctx.Done(): return case <-c.cosmosClient.EventListenerQuit(): - // TODO(omritoptix): Fallback to polling + return case e = <-stateUpdatesC: case e = <-sequencersListC: @@ -86,7 +86,7 @@ func (c *Client) eventHandler() { } func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[string]string) { - // Assert value is in map and publish it to the event bus + internalType, ok := eventMap[event.Query] if !ok { c.logger.Error("Ignoring event. Type not supported.", "event", event) @@ -105,7 +105,7 @@ func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[stri func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBatch, error) { var errs []error - // check all expected attributes exists + events := rawEventData.Events if events["state_update.num_blocks"] == nil || events["state_update.start_height"] == nil || events["state_update.state_info_index"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -137,12 +137,12 @@ func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventD } func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBondedSequencer, error) { - // check all expected attributes exists + events := rawEventData.Events if events["create_sequencer.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - // TODO: validate rollappID + if events["create_sequencer.sequencer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -154,13 +154,13 @@ func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.Ev } func convertToRotationStartedEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataRotationStarted, error) { - // check all expected attributes exists + events := rawEventData.Events if events["proposer_rotation_started.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - // TODO: validate rollappID + if events["proposer_rotation_started.next_proposer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") diff --git a/settlement/dymension/options.go b/settlement/dymension/options.go index 94ffa07c3..00cc5be2d 100644 --- a/settlement/dymension/options.go +++ b/settlement/dymension/options.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) -// WithCosmosClient is an option that sets the CosmosClient. + func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -14,7 +14,7 @@ func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { } } -// WithRetryAttempts is an option that sets the number of attempts to retry when interacting with the settlement layer. + func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -22,7 +22,7 @@ func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { } } -// WithBatchAcceptanceTimeout is an option that sets the timeout for waiting for a batch to be accepted by the settlement layer. + func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -30,7 +30,7 @@ func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement } } -// WithBatchAcceptanceAttempts is an option that sets the number of attempts to check if a batch has been accepted by the settlement layer. + func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -38,7 +38,7 @@ func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option } } -// WithRetryMinDelay is an option that sets the retry function mindelay between hub retry attempts. + func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -46,7 +46,7 @@ func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { } } -// WithRetryMaxDelay is an option that sets the retry function max delay between hub retry attempts. + func WithRetryMaxDelay(retryMaxDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) diff --git a/settlement/dymension/utils.go b/settlement/dymension/utils.go index def62fb91..6dbbae0a7 100644 --- a/settlement/dymension/utils.go +++ b/settlement/dymension/utils.go @@ -8,8 +8,8 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// RunWithRetry runs the given operation with retry, doing a number of attempts, and taking the last -// error only. It uses the context of the HubClient. + + func (c *Client) RunWithRetry(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), @@ -20,8 +20,8 @@ func (c *Client) RunWithRetry(operation func() error) error { ) } -// RunWithRetryInfinitely runs the given operation with retry, doing a number of attempts, and taking the last -// error only. It uses the context of the HubClient. + + func (c *Client) RunWithRetryInfinitely(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), diff --git a/settlement/errors.go b/settlement/errors.go index b2b4073b7..55496c242 100644 --- a/settlement/errors.go +++ b/settlement/errors.go @@ -6,7 +6,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// ErrBatchNotAccepted is returned when a batch is not accepted by the settlement layer. + var ErrBatchNotAccepted = fmt.Errorf("batch not accepted: %w", gerrc.ErrUnknown) type ErrNextSequencerAddressFraud struct { diff --git a/settlement/events.go b/settlement/events.go index 2ff811410..931df574f 100644 --- a/settlement/events.go +++ b/settlement/events.go @@ -7,17 +7,17 @@ import ( ) const ( - // EventTypeKey is a reserved composite key for event name. + EventTypeKey = "settlement.event" - // Event types + EventNewBatchAccepted = "NewBatchAccepted" EventNewBondedSequencer = "NewBondedSequencer" EventRotationStarted = "RotationStarted" EventNewBatchFinalized = "NewBatchFinalized" ) -// Convenience objects + var ( EventNewBatchAcceptedList = map[string][]string{EventTypeKey: {EventNewBatchAccepted}} EventNewBondedSequencerList = map[string][]string{EventTypeKey: {EventNewBondedSequencer}} @@ -25,7 +25,7 @@ var ( EventNewBatchFinalizedList = map[string][]string{EventTypeKey: {EventNewBatchFinalized}} ) -// Queries + var ( EventQueryNewSettlementBatchAccepted = uevent.QueryFor(EventTypeKey, EventNewBatchAccepted) EventQueryNewSettlementBatchFinalized = uevent.QueryFor(EventTypeKey, EventNewBatchFinalized) @@ -33,13 +33,13 @@ var ( EventQueryRotationStarted = uevent.QueryFor(EventTypeKey, EventRotationStarted) ) -// Data + type EventDataNewBatch struct { StartHeight uint64 - // EndHeight is the height of the last accepted batch + EndHeight uint64 - // StateIndex is the rollapp-specific index the batch was saved in the SL + StateIndex uint64 } diff --git a/settlement/grpc/grpc.go b/settlement/grpc/grpc.go index c09c72798..45c5deef0 100644 --- a/settlement/grpc/grpc.go +++ b/settlement/grpc/grpc.go @@ -36,8 +36,8 @@ const ( addressPrefix = "dym" ) -// Client is an extension of the base settlement layer client -// for usage in tests and local development. + + type Client struct { ctx context.Context rollappID string @@ -59,14 +59,14 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { }, nil } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } var _ settlement.ClientI = (*Client)(nil) -// Init initializes the mock layer client. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { ctx := context.Background() @@ -149,7 +149,7 @@ func initConfig(conf settlement.Config) (proposer string, err error) { return } -// Start starts the mock client + func (c *Client) Start() error { c.logger.Info("Starting grpc mock settlement") @@ -159,7 +159,7 @@ func (c *Client) Start() error { for { select { case <-c.stopchan: - // stop + return case <-tick.C: index, err := c.sl.GetIndex(c.ctx, &slmock.SLGetIndexRequest{}) @@ -185,14 +185,14 @@ func (c *Client) Start() error { return nil } -// Stop stops the mock client + func (c *Client) Stop() error { c.logger.Info("Stopping grpc mock settlement") close(c.stopchan) return nil } -// SubmitBatch saves the batch to the kv store + func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchtoSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -200,7 +200,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(10 * time.Millisecond) // mimic a delay in batch acceptance + time.Sleep(10 * time.Millisecond) err = c.pubsub.PublishWithEvents(context.Background(), &settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) if err != nil { return err @@ -208,7 +208,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return nil } -// GetLatestBatch returns the latest batch from the kv store + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.logger.Info("GetLatestBatch grpc", "index", c.slStateIndex) batchResult, err := c.GetBatchAtIndex(atomic.LoadUint64(&c.slStateIndex)) @@ -218,7 +218,7 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } -// GetBatchAtIndex returns the batch at the given index + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -230,7 +230,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, } func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { - // Binary search implementation + left, right := uint64(1), c.slStateIndex for left <= right { @@ -256,7 +256,7 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return nil, gerrc.ErrNotFound } -// GetProposerAtHeight implements settlement.ClientI. + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -279,17 +279,17 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } -// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in grpc SL + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in grpc SL") } -// GetAllSequencers implements settlement.ClientI. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } -// GetBondedSequencers implements settlement.ClientI. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -298,17 +298,17 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } -// GetNextProposer implements settlement.ClientI. + func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight.Load(), nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } @@ -320,7 +320,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { if err != nil { return err } - // Save the batch to the next state index + c.logger.Debug("Saving batch to grpc settlement layer", "index", c.slStateIndex+1) setBatchReply, err := c.sl.SetBatch(c.ctx, &slmock.SLSetBatchRequest{Index: c.slStateIndex + 1, Batch: b}) if err != nil { @@ -337,7 +337,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { return err } c.logger.Debug("Setting grpc SL Index to ", "index", setIndexReply.GetIndex()) - // Save latest height in memory and in store + c.latestHeight.Store(batch.EndHeight) return nil } diff --git a/settlement/local/local.go b/settlement/local/local.go index 4d8a64664..20d3ec8ee 100644 --- a/settlement/local/local.go +++ b/settlement/local/local.go @@ -38,18 +38,18 @@ const ( var ( settlementKVPrefix = []byte{0} - slStateIndexKey = []byte("slStateIndex") // used to recover after reboot + slStateIndexKey = []byte("slStateIndex") ) -// Client is an extension of the base settlement layer client -// for usage in tests and local development. + + type Client struct { rollappID string ProposerPubKey string logger types.Logger pubsub *pubsub.Server - mu sync.Mutex // keep the following in sync with *each other* + mu sync.Mutex slStateIndex uint64 latestHeight uint64 settlementKV store.KV @@ -64,7 +64,7 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { var _ settlement.ClientI = (*Client)(nil) -// Init initializes the mock layer client. + func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { slstore, proposer, err := initConfig(config) if err != nil { @@ -77,7 +77,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub b, err := settlementKV.Get(slStateIndexKey) if err == nil { slStateIndex = binary.BigEndian.Uint64(b) - // Get the latest height from the stateIndex + var settlementBatch rollapptypes.MsgUpdateState b, err := settlementKV.Get(keyFromIndex(slStateIndex)) if err != nil { @@ -101,9 +101,9 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err error) { if conf.KeyringHomeDir == "" { - // init store + slstore = store.NewDefaultInMemoryKVStore() - // init proposer pub key + if conf.ProposerPubKey != "" { proposer = conf.ProposerPubKey } else { @@ -135,17 +135,17 @@ func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err return } -// Start starts the mock client + func (c *Client) Start() error { return nil } -// Stop stops the mock client + func (c *Client) Stop() error { return c.settlementKV.Close() } -// PostBatch saves the batch to the kv store + func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchToSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -153,14 +153,14 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(100 * time.Millisecond) // mimic a delay in batch acceptance + time.Sleep(100 * time.Millisecond) ctx := context.Background() uevent.MustPublish(ctx, c.pubsub, settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) return nil } -// GetLatestBatch returns the latest batch from the kv store + func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() ix := c.slStateIndex @@ -172,17 +172,17 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } -// GetLatestHeight returns the latest state update height from the settlement layer. + func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight, nil } -// GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } -// GetBatchAtIndex returns the batch at the given index + func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -196,7 +196,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() defer c.mu.Unlock() - // TODO: optimize (binary search, or just make another index) + for i := c.slStateIndex; i > 0; i-- { b, err := c.GetBatchAtIndex(i) if err != nil { @@ -208,10 +208,10 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return b, nil } } - return nil, gerrc.ErrNotFound // TODO: need to return a cosmos specific error? + return nil, gerrc.ErrNotFound } -// GetProposerAtHeight implements settlement.ClientI. + func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -234,22 +234,22 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } -// GetSequencerByAddress returns all sequencer information by its address. Not implemented since it will not be used in mock SL + func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in local SL") } -// GetAllSequencers implements settlement.ClientI. + func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } -// GetObsoleteDrs returns the list of deprecated DRS. + func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } -// GetBondedSequencers implements settlement.ClientI. + func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -258,7 +258,7 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } -// GetNextProposer implements settlement.ClientI. + func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } @@ -274,7 +274,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { c.mu.Lock() defer c.mu.Unlock() - // Save the batch to the next state index + c.slStateIndex++ err = c.settlementKV.Set(keyFromIndex(c.slStateIndex), b) if err != nil { diff --git a/settlement/registry/registry.go b/settlement/registry/registry.go index 9649f5c5b..c8bdbe5e5 100644 --- a/settlement/registry/registry.go +++ b/settlement/registry/registry.go @@ -7,26 +7,26 @@ import ( "github.com/dymensionxyz/dymint/settlement/local" ) -// Client represents a settlement layer client + type Client string const ( - // Local is a mock client for the settlement layer + Local Client = "mock" - // Dymension is a client for interacting with dymension settlement layer + Dymension Client = "dymension" - // Mock client using grpc for a shared use + Grpc Client = "grpc" ) -// A central registry for all Settlement Layer Clients + var clients = map[Client]func() settlement.ClientI{ Local: func() settlement.ClientI { return &local.Client{} }, Dymension: func() settlement.ClientI { return &dymension.Client{} }, Grpc: func() settlement.ClientI { return &grpc.Client{} }, } -// GetClient returns client identified by name. + func GetClient(client Client) settlement.ClientI { f, ok := clients[client] if !ok { @@ -35,7 +35,7 @@ func GetClient(client Client) settlement.ClientI { return f() } -// RegisteredClients returns names of all settlement clients in registry. + func RegisteredClients() []Client { registered := make([]Client, 0, len(clients)) for client := range clients { diff --git a/settlement/settlement.go b/settlement/settlement.go index 4b03327a2..fbbbf9a63 100644 --- a/settlement/settlement.go +++ b/settlement/settlement.go @@ -8,10 +8,10 @@ import ( "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// StatusCode is a type for settlement layer return status. + type StatusCode uint64 -// settlement layer return codes. + const ( StatusUnknown StatusCode = iota StatusSuccess @@ -20,12 +20,12 @@ const ( ) type ResultBase struct { - // Code is to determine if the action succeeded. + Code StatusCode - // Message may contain settlement layer specific information (like detailed error message, etc) + Message string - // TODO(omritoptix): Move StateIndex to be part of the batch struct - // StateIndex is the rollapp-specific index the batch was saved in the SL + + StateIndex uint64 } @@ -34,16 +34,16 @@ type BatchMetaData struct { } type Batch struct { - // sequencer is the bech32-encoded address of the sequencer sent the update + Sequencer string StartHeight uint64 EndHeight uint64 BlockDescriptors []rollapp.BlockDescriptor NextSequencer string - // MetaData about the batch in the DA layer + MetaData *BatchMetaData - NumBlocks uint64 // FIXME: can be removed. not used and will be deprecated + NumBlocks uint64 } type ResultRetrieveBatch struct { @@ -56,51 +56,51 @@ type State struct { } type ResultGetHeightState struct { - ResultBase // NOTE: the state index of this will not be populated + ResultBase State } -// Option is a function that sets a parameter on the settlement layer. + type Option func(ClientI) -// ClientI defines generic interface for Settlement layer interaction. + type ClientI interface { - // Init is called once for the client initialization + Init(config Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...Option) error - // Start is called once, after Init. It's implementation should start the client service. + Start() error - // Stop is called once, after Start. It should stop the client service. + Stop() error - // SubmitBatch tries submitting the batch in an async way to the settlement layer. This should create a transaction which (potentially) - // triggers a state transition in the settlement layer. Events are emitted on success or failure. + + SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error - // GetLatestBatch returns the latest batch from the settlement layer. + GetLatestBatch() (*ResultRetrieveBatch, error) - // GetBatchAtIndex returns the batch at the given index. + GetBatchAtIndex(index uint64) (*ResultRetrieveBatch, error) - // GetSequencerByAddress returns all sequencer information by its address. + GetSequencerByAddress(address string) (types.Sequencer, error) - // GetBatchAtHeight returns the batch at the given height. + GetBatchAtHeight(index uint64) (*ResultRetrieveBatch, error) - // GetLatestHeight returns the latest state update height from the settlement layer. + GetLatestHeight() (uint64, error) - // GetLatestFinalizedHeight returns the latest finalized height from the settlement layer. + GetLatestFinalizedHeight() (uint64, error) - // GetAllSequencers returns all sequencers for this rollapp (bonded and not bonded). + GetAllSequencers() ([]types.Sequencer, error) - // GetBondedSequencers returns the list of the bonded sequencers for this rollapp. + GetBondedSequencers() ([]types.Sequencer, error) - // GetProposerAtHeight returns the current proposer for this chain. + GetProposerAtHeight(height int64) (*types.Sequencer, error) - // GetNextProposer returns the next proposer for this chain in case of a rotation. - // If no rotation is in progress, it should return nil. + + GetNextProposer() (*types.Sequencer, error) - // GetRollapp returns the rollapp information. + GetRollapp() (*types.Rollapp, error) - // GetObsoleteDrs returns the list of deprecated DRS. + GetObsoleteDrs() ([]uint32, error) - // GetSignerBalance returns the balance of the signer. + GetSignerBalance() (types.Balance, error) - // ValidateGenesisBridgeData validates the genesis bridge data. + ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error } diff --git a/store/badger.go b/store/badger.go index 5fbb244f5..6a67526f2 100644 --- a/store/badger.go +++ b/store/badger.go @@ -16,7 +16,7 @@ import ( const ( gcTimeout = 1 * time.Minute - discardRatio = 0.5 // Recommended by badger. Indicates that a file will be rewritten if half the space can be discarded. + discardRatio = 0.5 ) var ( @@ -24,14 +24,14 @@ var ( _ KVBatch = &BadgerBatch{} ) -// BadgerKV is a implementation of KVStore using Badger v3. + type BadgerKV struct { db *badger.DB closing chan struct{} closeOnce sync.Once } -// NewDefaultInMemoryKVStore builds KVStore that works in-memory (without accessing disk). + func NewDefaultInMemoryKVStore() KV { db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) if err != nil { @@ -58,12 +58,12 @@ func NewKVStore(rootDir, dbPath, dbName string, syncWrites bool, logger types.Lo return b } -// NewDefaultKVStore creates instance of default key-value store. + func NewDefaultKVStore(rootDir, dbPath, dbName string) KV { return NewKVStore(rootDir, dbPath, dbName, true, log.NewNopLogger()) } -// Rootify is helper function to make config creation independent of root dir + func Rootify(rootDir, dbPath string) string { if filepath.IsAbs(dbPath) { return dbPath @@ -71,7 +71,7 @@ func Rootify(rootDir, dbPath string) string { return filepath.Join(rootDir, dbPath) } -// Close implements KVStore. + func (b *BadgerKV) Close() error { b.closeOnce.Do(func() { close(b.closing) @@ -85,7 +85,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L for { select { case <-b.closing: - // Exit the periodic garbage collector function when store is closed + return case <-ticker.C: err := b.db.RunValueLogGC(discardRatio) @@ -97,7 +97,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L } } -// Get returns value for given key, or error. + func (b *BadgerKV) Get(key []byte) ([]byte, error) { txn := b.db.NewTransaction(false) defer txn.Discard() @@ -111,7 +111,7 @@ func (b *BadgerKV) Get(key []byte) ([]byte, error) { return item.ValueCopy(nil) } -// Set saves key-value mapping in store. + func (b *BadgerKV) Set(key []byte, value []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -122,7 +122,7 @@ func (b *BadgerKV) Set(key []byte, value []byte) error { return txn.Commit() } -// Delete removes key and corresponding value from store. + func (b *BadgerKV) Delete(key []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -133,20 +133,20 @@ func (b *BadgerKV) Delete(key []byte) error { return txn.Commit() } -// NewBatch creates new batch. -// Note: badger batches should be short lived as they use extra resources. + + func (b *BadgerKV) NewBatch() KVBatch { return &BadgerBatch{ txn: b.db.NewTransaction(true), } } -// BadgerBatch encapsulates badger transaction + type BadgerBatch struct { txn *badger.Txn } -// Set accumulates key-value entries in a transaction + func (bb *BadgerBatch) Set(key, value []byte) error { if err := bb.txn.Set(key, value); err != nil { return err @@ -155,24 +155,24 @@ func (bb *BadgerBatch) Set(key, value []byte) error { return nil } -// Delete removes the key and associated value from store + func (bb *BadgerBatch) Delete(key []byte) error { return bb.txn.Delete(key) } -// Commit commits a transaction + func (bb *BadgerBatch) Commit() error { return bb.txn.Commit() } -// Discard cancels a transaction + func (bb *BadgerBatch) Discard() { bb.txn.Discard() } var _ KVIterator = &BadgerIterator{} -// PrefixIterator returns instance of prefix Iterator for BadgerKV. + func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { txn := b.db.NewTransaction(false) iter := txn.NewIterator(badger.DefaultIteratorOptions) @@ -185,7 +185,7 @@ func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { } } -// BadgerIterator encapsulates prefix iterator for badger kv store. + type BadgerIterator struct { txn *badger.Txn iter *badger.Iterator @@ -193,22 +193,22 @@ type BadgerIterator struct { lastError error } -// Valid returns true if iterator is inside its prefix, false otherwise. + func (i *BadgerIterator) Valid() bool { return i.iter.ValidForPrefix(i.prefix) } -// Next progresses iterator to the next key-value pair. + func (i *BadgerIterator) Next() { i.iter.Next() } -// Key returns key pointed by iterator. + func (i *BadgerIterator) Key() []byte { return i.iter.Item().KeyCopy(nil) } -// Value returns value pointer by iterator. + func (i *BadgerIterator) Value() []byte { val, err := i.iter.Item().ValueCopy(nil) if err != nil { @@ -217,45 +217,45 @@ func (i *BadgerIterator) Value() []byte { return val } -// Error returns last error that occurred during iteration. + func (i *BadgerIterator) Error() error { return i.lastError } -// Discard has to be called to free iterator resources. + func (i *BadgerIterator) Discard() { i.iter.Close() i.txn.Discard() } -// memoryEfficientBadgerConfig sets badger configuration parameters to reduce memory usage, specially during compactions to avoid memory spikes that causes OOM. -// based on https://github.com/celestiaorg/celestia-node/issues/2905 + + func memoryEfficientBadgerConfig(path string, syncWrites bool) *badger.Options { - opts := badger.DefaultOptions(path) // this must be copied - // SyncWrites is a configuration option in Badger that determines whether writes are immediately synced to disk or no. - // If set to true it writes to the write-ahead log (value log) are synced to disk before being applied to the LSM tree. + opts := badger.DefaultOptions(path) + + opts.SyncWrites = syncWrites - // default 64mib => 0 - disable block cache - // BlockCacheSize specifies how much data cache should hold in memory. - // It improves lookup performance but increases memory consumption. - // Not really necessary if disabling compression + + + + opts.BlockCacheSize = 0 - // compressions reduces storage usage but increases memory consumption, specially during compaction + opts.Compression = options.None - // MemTables: maximum size of in-memory data structures before they are flushed to disk - // default 64mib => 16mib - decreases memory usage and makes compaction more often + + opts.MemTableSize = 16 << 20 - // NumMemtables is a configuration option in Badger that sets the maximum number of memtables to keep in memory before stalling - // default 5 => 3 + + opts.NumMemtables = 3 - // NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts - // default 5 => 3 + + opts.NumLevelZeroTables = 3 - // default 15 => 5 - this prevents memory growth on CPU constraint systems by blocking all writers + opts.NumLevelZeroTablesStall = 5 - // reducing number compactors, makes it slower but reduces memory usage during compaction + opts.NumCompactors = 2 - // makes sure badger is always compacted on shutdown + opts.CompactL0OnClose = true return &opts diff --git a/store/prefix.go b/store/prefix.go index 23842dff3..e0f4f77d6 100644 --- a/store/prefix.go +++ b/store/prefix.go @@ -5,18 +5,18 @@ var ( _ KVBatch = &PrefixKVBatch{} ) -// PrefixKV is a key-value store that prepends all keys with given prefix. + type PrefixKV struct { kv KV prefix []byte } -// Close implements KVStore. + func (p *PrefixKV) Close() error { return p.kv.Close() } -// NewPrefixKV creates new PrefixKV on top of other KVStore. + func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { return &PrefixKV{ kv: kv, @@ -24,22 +24,22 @@ func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { } } -// Get returns value for given key. + func (p *PrefixKV) Get(key []byte) ([]byte, error) { return p.kv.Get(append(p.prefix, key...)) } -// Set updates the value for given key. + func (p *PrefixKV) Set(key []byte, value []byte) error { return p.kv.Set(append(p.prefix, key...), value) } -// Delete deletes key-value pair for given key. + func (p *PrefixKV) Delete(key []byte) error { return p.kv.Delete(append(p.prefix, key...)) } -// NewBatch creates a new batch. + func (p *PrefixKV) NewBatch() KVBatch { return &PrefixKVBatch{ b: p.kv.NewBatch(), @@ -47,33 +47,33 @@ func (p *PrefixKV) NewBatch() KVBatch { } } -// PrefixIterator creates iterator to traverse given prefix. + func (p *PrefixKV) PrefixIterator(prefix []byte) KVIterator { return p.kv.PrefixIterator(append(p.prefix, prefix...)) } -// PrefixKVBatch enables batching of operations on PrefixKV. + type PrefixKVBatch struct { b KVBatch prefix []byte } -// Set adds key-value pair to batch. + func (pb *PrefixKVBatch) Set(key, value []byte) error { return pb.b.Set(append(pb.prefix, key...), value) } -// Delete adds delete operation to batch. + func (pb *PrefixKVBatch) Delete(key []byte) error { return pb.b.Delete(append(pb.prefix, key...)) } -// Commit applies all operations in the batch atomically. + func (pb *PrefixKVBatch) Commit() error { return pb.b.Commit() } -// Discard discards all operations in the batch. + func (pb *PrefixKVBatch) Discard() { pb.b.Discard() } diff --git a/store/pruning.go b/store/pruning.go index 5940f8ae9..5d3ee3ed3 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -8,7 +8,7 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// PruneStore removes blocks up to (but not including) a height. It returns number of blocks pruned. + func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { pruned := uint64(0) from, err := s.LoadBaseHeight() @@ -29,7 +29,7 @@ func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error return pruned, nil } -// pruneHeights prunes all store entries that are stored along blocks (blocks,commit,proposer, etc) + func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint64, error) { pruneBlocks := func(batch KVBatch, height uint64) error { hash, err := s.loadHashFromIndex(height) @@ -64,7 +64,7 @@ func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint6 return pruned, err } -// prune is the function that iterates through all heights and prunes according to the pruning function set + func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height uint64) error, logger types.Logger) (uint64, error) { pruned := uint64(0) batch := s.db.NewBatch() @@ -86,7 +86,7 @@ func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height u } pruned++ - // flush every 1000 blocks to avoid batches becoming too large + if pruned%1000 == 0 && pruned > 0 { err := flush(batch, h) if err != nil { diff --git a/store/store.go b/store/store.go index f0be24df9..a0ee6dbd8 100644 --- a/store/store.go +++ b/store/store.go @@ -30,33 +30,33 @@ var ( lastBlockSequencerSetPrefix = [1]byte{14} ) -// DefaultStore is a default store implementation. + type DefaultStore struct { db KV } var _ Store = &DefaultStore{} -// New returns new, default store. + func New(kv KV) Store { return &DefaultStore{ db: kv, } } -// Close implements Store. + func (s *DefaultStore) Close() error { return s.db.Close() } -// NewBatch creates a new db batch. + func (s *DefaultStore) NewBatch() KVBatch { return s.db.NewBatch() } -// SaveBlock adds block to the store along with corresponding commit. -// Stored height is updated if block height is greater than stored value. -// In case a batch is provided, the block and commit are added to the batch and not saved. + + + func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) { hash := block.Header.Hash() blockBlob, err := block.MarshalBinary() @@ -69,7 +69,7 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return batch, fmt.Errorf("marshal Commit to binary: %w", err) } - // Not sure it's neeeded, as it's not used anywhere + if batch != nil { err = multierr.Append(err, batch.Set(getBlockKey(hash), blockBlob)) err = multierr.Append(err, batch.Set(getCommitKey(hash), commitBlob)) @@ -94,10 +94,10 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return nil, nil } -// LoadBlock returns block at given height, or error if it's not found in Store. -// TODO(tzdybal): what is more common access pattern? by height or by hash? -// currently, we're indexing height->hash, and store blocks by hash, but we might as well store by height -// and index hash->height + + + + func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { h, err := s.loadHashFromIndex(height) if err != nil { @@ -106,7 +106,7 @@ func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { return s.LoadBlockByHash(h) } -// LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. + func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { blockData, err := s.db.Get(getBlockKey(hash)) if err != nil { @@ -121,7 +121,7 @@ func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return block, nil } -// SaveBlockSource saves block validation in Store. + func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, batch KVBatch) (KVBatch, error) { b := make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(source)) @@ -132,7 +132,7 @@ func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, return batch, err } -// LoadBlockSource returns block validation in Store. + func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) { source, err := s.db.Get(getSourceKey(height)) if err != nil { @@ -141,7 +141,7 @@ func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) return types.BlockSource(binary.LittleEndian.Uint64(source)), nil } -// SaveBlockResponses saves block responses (events, tx responses, etc) in Store. + func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) { data, err := responses.Marshal() if err != nil { @@ -154,7 +154,7 @@ func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCI return batch, err } -// LoadBlockResponses returns block results at given height, or error if it's not found in Store. + func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) { data, err := s.db.Get(getResponsesKey(height)) if err != nil { @@ -168,7 +168,7 @@ func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses return &responses, nil } -// LoadCommit returns commit for a block at given height, or error if it's not found in Store. + func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { hash, err := s.loadHashFromIndex(height) if err != nil { @@ -177,7 +177,7 @@ func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { return s.LoadCommitByHash(hash) } -// LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. + func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { commitData, err := s.db.Get(getCommitKey(hash)) if err != nil { @@ -191,8 +191,8 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return commit, nil } -// SaveState updates state saved in Store. Only one State is stored. -// If there is no State in Store, state will be saved. + + func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, error) { pbState, err := state.ToProto() if err != nil { @@ -210,7 +210,7 @@ func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, er return batch, err } -// LoadState returns last state saved with UpdateState. + func (s *DefaultStore) LoadState() (*types.State, error) { blob, err := s.db.Get(getStateKey()) if err != nil { @@ -231,7 +231,7 @@ func (s *DefaultStore) LoadState() (*types.State, error) { return &state, nil } -// SaveProposer stores the proposer for given block height in store. + func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) { pbProposer, err := proposer.ToProto() if err != nil { @@ -249,7 +249,7 @@ func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, bat return batch, err } -// LoadProposer loads proposer at given block height from store. + func (s *DefaultStore) LoadProposer(height uint64) (types.Sequencer, error) { blob, err := s.db.Get(getProposerKey(height)) if err != nil { diff --git a/store/storeIface.go b/store/storeIface.go index 8220b25ad..4cdd2265b 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -7,27 +7,27 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// KV encapsulates key-value store abstraction, in minimalistic interface. -// -// KV MUST be thread safe. + + + type KV interface { - Get(key []byte) ([]byte, error) // Get gets the value for a key. - Set(key []byte, value []byte) error // Set updates the value for a key. - Delete(key []byte) error // Delete deletes a key. - NewBatch() KVBatch // NewBatch creates a new batch. - PrefixIterator(prefix []byte) KVIterator // PrefixIterator creates iterator to traverse given prefix. - Close() error // Close closes the store. + Get(key []byte) ([]byte, error) + Set(key []byte, value []byte) error + Delete(key []byte) error + NewBatch() KVBatch + PrefixIterator(prefix []byte) KVIterator + Close() error } -// KVBatch enables batching of transactions. + type KVBatch interface { - Set(key, value []byte) error // Accumulates KV entries in a transaction. - Delete(key []byte) error // Deletes the given key. - Commit() error // Commits the transaction. - Discard() // Discards the transaction. + Set(key, value []byte) error + Delete(key []byte) error + Commit() error + Discard() } -// KVIterator enables traversal over a given prefix. + type KVIterator interface { Valid() bool Next() @@ -37,37 +37,37 @@ type KVIterator interface { Discard() } -// Store is minimal interface for storing and retrieving blocks, commits and state. + type Store interface { - // NewBatch creates a new db batch. + NewBatch() KVBatch - // SaveBlock saves block along with its seen commit (which will be included in the next block). + SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) - // LoadBlock returns block at given height, or error if it's not found in Store. + LoadBlock(height uint64) (*types.Block, error) - // LoadBlockByHash returns block with given block header hash, or error if it's not found in Store. + LoadBlockByHash(hash [32]byte) (*types.Block, error) - // SaveBlockResponses saves block responses (events, tx responses, validator set updates, etc) in Store. + SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) - // LoadBlockResponses returns block results at given height, or error if it's not found in Store. + LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) - // LoadCommit returns commit for a block at given height, or error if it's not found in Store. + LoadCommit(height uint64) (*types.Commit, error) - // LoadCommitByHash returns commit for a block with given block header hash, or error if it's not found in Store. + LoadCommitByHash(hash [32]byte) (*types.Commit, error) - // SaveState updates state saved in Store. Only one State is stored. - // If there is no State in Store, state will be saved. + + SaveState(state *types.State, batch KVBatch) (KVBatch, error) - // LoadState returns last state saved with UpdateState. + LoadState() (*types.State, error) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go index 456f78b1d..ef45d2c3b 100644 --- a/test/loadtime/cmd/load/main.go +++ b/test/loadtime/cmd/load/main.go @@ -10,20 +10,20 @@ import ( "github.com/dymensionxyz/dymint/test/pb/loadtime" ) -// Ensure all of the interfaces are correctly satisfied. + var ( _ loadtest.ClientFactory = (*ClientFactory)(nil) _ loadtest.Client = (*TxGenerator)(nil) ) -// ClientFactory implements the loadtest.ClientFactory interface. + type ClientFactory struct { ID []byte } -// TxGenerator is responsible for generating transactions. -// TxGenerator holds the set of information that will be used to generate -// each transaction. + + + type TxGenerator struct { id []byte conns uint64 @@ -32,7 +32,7 @@ type TxGenerator struct { } func main() { - u := [16]byte(uuid.New()) // generate run ID on startup + u := [16]byte(uuid.New()) if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{ID: u[:]}); err != nil { panic(err) } @@ -44,7 +44,7 @@ func main() { }) } -// ValidateConfig validates the configuration for the load test. + func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { psb, err := payload.MaxUnpaddedSize() if err != nil { @@ -56,9 +56,9 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { return nil } -// NewClient creates a new client for the load test. -// -//nolint:gosec // params are always positive and fall in uint64 + + + func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { return &TxGenerator{ id: f.ID, @@ -68,7 +68,7 @@ func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) }, nil } -// GenerateTx generates a new transactions for the load test. + func (c *TxGenerator) GenerateTx() ([]byte, error) { return payload.NewBytes(&loadtime.Payload{ Connections: c.conns, diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 4fd90ebe3..1f17e6f17 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -19,19 +19,19 @@ const ( var mainPrefix = [1]byte{0} -// BlockStore is a thin wrapper around the DefaultStore which will be used for inspecting the blocks + type BlockStore struct { *store.DefaultStore base uint64 height uint64 } -// Height implements report.BlockStore. + func (b *BlockStore) Height() uint64 { return b.height } -// Base will be used to get the block height of the first block we want to generate the report for + func (b *BlockStore) Base() uint64 { return b.base } diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go index ba538a1df..06f8d30b9 100644 --- a/test/loadtime/payload/payload.go +++ b/test/loadtime/payload/payload.go @@ -16,9 +16,9 @@ const ( maxPayloadSize = 4 * 1024 * 1024 ) -// NewBytes generates a new payload and returns the encoded representation of -// the payload as a slice of bytes. NewBytes uses the fields on the Options -// to create the payload. + + + func NewBytes(p *loadtime.Payload) ([]byte, error) { p.Padding = make([]byte, 1) nullTime := time.Time{} @@ -32,12 +32,12 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { if p.Size() > maxPayloadSize { return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size(), maxPayloadSize) } - pSize := int(p.GetSize_()) // #nosec -- The "if" above makes this cast safe + pSize := int(p.GetSize_()) if pSize < us { return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) } - // We halve the padding size because we transform the TX to hex + p.Padding = make([]byte, (pSize-us)/2) _, err = rand.Read(p.Padding) if err != nil { @@ -49,14 +49,14 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { } h := []byte(hex.EncodeToString(b)) - // prepend a single key so that the kv store only ever stores a single - // transaction instead of storing all tx and ballooning in size. + + return append([]byte(keyPrefix), h...), nil } -// FromBytes extracts a paylod from the byte representation of the payload. -// FromBytes leaves the padding untouched, returning it to the caller to handle -// or discard per their preference. + + + func FromBytes(b []byte) (*loadtime.Payload, error) { trH := bytes.TrimPrefix(b, []byte(keyPrefix)) if bytes.Equal(b, trH) { @@ -75,8 +75,8 @@ func FromBytes(b []byte) (*loadtime.Payload, error) { return p, nil } -// MaxUnpaddedSize returns the maximum size that a payload may be if no padding -// is included. + + func MaxUnpaddedSize() (int, error) { p := &loadtime.Payload{ Time: time.Now(), @@ -88,9 +88,9 @@ func MaxUnpaddedSize() (int, error) { return CalculateUnpaddedSize(p) } -// CalculateUnpaddedSize calculates the size of the passed in payload for the -// purpose of determining how much padding to add to reach the target size. -// CalculateUnpaddedSize returns an error if the payload Padding field is longer than 1. + + + func CalculateUnpaddedSize(p *loadtime.Payload) (int, error) { if len(p.Padding) != 1 { return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index 0a8746d7d..f38865ecf 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -13,66 +13,66 @@ import ( "github.com/dymensionxyz/dymint/types" ) -// BlockStore defines the set of methods needed by the report generator from -// Tendermint's store.Blockstore type. Using an interface allows for tests to -// more easily simulate the required behavior without having to use the more -// complex real API. + + + + type BlockStore interface { Height() uint64 Base() uint64 LoadBlock(uint64) (*types.Block, error) } -// DataPoint contains the set of data collected for each transaction. + type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte } -// Report contains the data calculated from reading the timestamped transactions -// of each block found in the blockstore. + + type Report struct { ID uuid.UUID Rate, Connections, Size uint64 Max, Min, Avg, StdDev time.Duration - // NegativeCount is the number of negative durations encountered while - // reading the transaction data. A negative duration means that - // a transaction timestamp was greater than the timestamp of the block it - // was included in and likely indicates an issue with the experimental - // setup. + + + + + NegativeCount int - // TPS is calculated by taking the highest averaged TPS over all consecutive blocks + TPS uint64 - // All contains all data points gathered from all valid transactions. - // The order of the contents of All is not guaranteed to be match the order of transactions - // in the chain. + + + All []DataPoint - // used for calculating average during report creation. + sum int64 } -// Reports is a collection of Report objects. + type Reports struct { s map[uuid.UUID]Report l []Report - // errorCount is the number of parsing errors encountered while reading the - // transaction data. Parsing errors may occur if a transaction not generated - // by the payload package is submitted to the chain. + + + errorCount int } -// List returns a slice of all reports. + func (rs *Reports) List() []Report { return rs.l } -// ErrorCount returns the number of erronous transactions encountered while creating the report + func (rs *Reports) ErrorCount() int { return rs.errorCount } @@ -100,9 +100,9 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has if int64(l) < 0 { r.NegativeCount++ } - // Using an int64 here makes an assumption about the scale and quantity of the data we are processing. - // If all latencies were 2 seconds, we would need around 4 billion records to overflow this. - // We are therefore assuming that the data does not exceed these bounds. + + + r.sum += int64(l) rs.s[id] = r } @@ -122,14 +122,14 @@ func (rs *Reports) calculateAll() { } } -// calculateTPS calculates the TPS by calculating a average moving window with a minimum size of 1 second over all consecutive blocks + func calculateTPS(in []DataPoint) uint64 { - // create a map of block times to the number of transactions in that block + blocks := make(map[time.Time]int) for _, v := range in { blocks[v.BlockTime]++ } - // sort the blocks by time + var blockTimes []time.Time for k := range blocks { blockTimes = append(blockTimes, k) @@ -137,7 +137,7 @@ func calculateTPS(in []DataPoint) uint64 { sort.Slice(blockTimes, func(i, j int) bool { return blockTimes[i].Before(blockTimes[j]) }) - // Iterave over the blocks and calculate the tps starting from each block + TPS := uint64(0) for index, blockTime := range blockTimes { currentTx := blocks[blockTime] @@ -160,8 +160,8 @@ func (rs *Reports) addError() { rs.errorCount++ } -// GenerateFromBlockStore creates a Report using the data in the provided -// BlockStore. + + func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID @@ -179,11 +179,11 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { s: make(map[uuid.UUID]Report), } - // Deserializing to proto can be slow but does not depend on other data - // and can therefore be done in parallel. - // Deserializing in parallel does mean that the resulting data is - // not guaranteed to be delivered in the same order it was given to the - // worker pool. + + + + + const poolSize = 16 txc := make(chan txData) diff --git a/testutil/block.go b/testutil/block.go index f60257055..a07944d9e 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -37,14 +37,14 @@ const ( DefaultTestBatchSize = 5 ) -/* -------------------------------------------------------------------------- */ -/* utils */ -/* -------------------------------------------------------------------------- */ + + + func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.ClientI, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { genesis := GenerateGenesis(genesisHeight) - // Change the LastBlockHeight to avoid calling InitChainSync within the manager - // And updating the state according to the genesis. + + raw, _ := proposerKey.GetPublic().Raw() pubkey := ed25519.PubKey(raw) @@ -67,7 +67,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt return nil, err } - // Init the settlement layer mock + if settlementlc == nil { settlementlc = slregistry.GetClient(slregistry.Local) } @@ -96,7 +96,7 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt mp := mempoolv1.NewTxMempool(logger, tmcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0) mpIDs := nodemempool.NewMempoolIDs() - // Init p2p client and validator + p2pKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) p2pClient, err := p2p.NewClient(config.P2PConfig{ GossipSubCacheSize: 50, diff --git a/testutil/logger.go b/testutil/logger.go index 3ef7a902d..c71789897 100644 --- a/testutil/logger.go +++ b/testutil/logger.go @@ -6,15 +6,15 @@ import ( "testing" ) -// TODO(tzdybal): move to some common place -// Logger is a simple, yet thread-safe, logger intended for use in unit tests. + + type Logger struct { mtx *sync.Mutex T *testing.T } -// NewLogger create a Logger that outputs data using given testing.T instance. + func NewLogger(t *testing.T) *Logger { return &Logger{ mtx: new(sync.Mutex), @@ -22,7 +22,7 @@ func NewLogger(t *testing.T) *Logger { } } -// Debug prints a debug message. + func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -30,7 +30,7 @@ func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"DEBUG: " + msg}, keyvals...)...) } -// Info prints an info message. + func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -38,7 +38,7 @@ func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"INFO: " + msg}, keyvals...)...) } -// Error prints an error message. + func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -46,24 +46,24 @@ func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"ERROR: " + msg}, keyvals...)...) } -// MockLogger is a fake logger that accumulates all the inputs. -// -// It can be used in tests to ensure that certain messages was logged with correct severity. + + + type MockLogger struct { DebugLines, InfoLines, ErrLines []string } -// Debug saves a debug message. + func (t *MockLogger) Debug(msg string, keyvals ...interface{}) { t.DebugLines = append(t.DebugLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } -// Info saves an info message. + func (t *MockLogger) Info(msg string, keyvals ...interface{}) { t.InfoLines = append(t.InfoLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } -// Error saves an error message. + func (t *MockLogger) Error(msg string, keyvals ...interface{}) { t.ErrLines = append(t.ErrLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } diff --git a/testutil/mocks.go b/testutil/mocks.go index 176f8d6d7..e750c081b 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -29,27 +29,27 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) -// ABCIMethod is a string representing an ABCI method + type ABCIMethod string const ( - // InitChain is the string representation of the InitChain ABCI method + InitChain ABCIMethod = "InitChain" - // CheckTx is the string representation of the CheckTx ABCI method + CheckTx ABCIMethod = "CheckTx" - // BeginBlock is the string representation of the BeginBlockMethod ABCI method + BeginBlock ABCIMethod = "BeginBlock" - // DeliverTx is the string representation of the DeliverTx ABCI method + DeliverTx ABCIMethod = "DeliverTx" - // EndBlock is the string representation of the EndBlock ABCI method + EndBlock ABCIMethod = "EndBlock" - // Commit is the string representation of the Commit ABCI method + Commit ABCIMethod = "Commit" - // Info is the string representation of the Info ABCI method + Info ABCIMethod = "Info" ) -// GetABCIProxyAppMock returns a dummy abci proxy app mock for testing + func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { app := GetAppMock() @@ -60,7 +60,7 @@ func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { return proxyApp } -// GetAppMock returns a dummy abci app mock for testing + func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app := &tmmocks.MockApplication{} gbdBz, _ := tmjson.Marshal(rollapptypes.GenesisBridgeData{}) @@ -72,7 +72,7 @@ func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) app.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) - // iterate exclude methods and unset the mock + for _, method := range excludeMethods { UnsetMockFn(app.On(string(method))) } @@ -92,7 +92,7 @@ var UnsetMockFn = func(call *mock.Call) { } } -// CountMockCalls returns the number of times a mock specific function was called + func CountMockCalls(totalCalls []mock.Call, methodName string) int { var count int for _, call := range totalCalls { @@ -103,7 +103,7 @@ func CountMockCalls(totalCalls []mock.Call, methodName string) int { return count } -// MockStore is a mock store for testing + type MockStore struct { ShoudFailSaveState bool ShouldFailUpdateStateWithBatch bool @@ -111,8 +111,8 @@ type MockStore struct { height uint64 } -// SetHeight sets the height of the mock store -// Don't set the height to mock failure in setting the height + + func (m *MockStore) SetHeight(height uint64) { m.height = height } @@ -125,7 +125,7 @@ func (m *MockStore) NextHeight() uint64 { return m.height + 1 } -// UpdateState updates the state of the mock store + func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KVBatch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") @@ -133,7 +133,7 @@ func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KV return m.DefaultStore.SaveState(state, batch) } -// NewMockStore returns a new mock store + func NewMockStore() *MockStore { defaultStore := store.New(store.NewDefaultInMemoryKVStore()) return &MockStore{ @@ -148,27 +148,27 @@ const ( connectionRefusedErrorMessage = "connection refused" ) -// DALayerClientSubmitBatchError is a mock data availability layer client that can be used to test error handling + type DALayerClientSubmitBatchError struct { localda.DataAvailabilityLayerClient } -// SubmitBatch submits a batch to the data availability layer + func (s *DALayerClientSubmitBatchError) SubmitBatch(_ *types.Batch) da.ResultSubmitBatch { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: connectionRefusedErrorMessage, Error: errors.New(connectionRefusedErrorMessage)}} } -// DALayerClientRetrieveBatchesError is a mock data availability layer client that can be used to test error handling + type DALayerClientRetrieveBatchesError struct { localda.DataAvailabilityLayerClient } -// RetrieveBatches retrieves batches from the data availability layer + func (m *DALayerClientRetrieveBatchesError) RetrieveBatches(_ *da.DASubmitMetaData) da.ResultRetrieveBatch { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: batchNotFoundErrorMessage, Error: da.ErrBlobNotFound}} } -// SubscribeMock is a mock to provide a subscription like behavior for testing + type SubscribeMock struct { messageCh chan interface{} } @@ -195,8 +195,8 @@ type MockDA struct { func NewMockDA(t *testing.T) (*MockDA, error) { mockDA := &MockDA{} - // Create DA - // init celestia DA with mock RPC client + + mockDA.DaClient = registry.GetClient("celestia") config := celestia.Config{ @@ -233,7 +233,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { nIDSize := 1 tree := exampleNMT(nIDSize, true, 1, 2, 3, 4) - // build a proof for an NID that is within the namespace range of the tree + proof, _ := tree.ProveNamespace(mockDA.NID) mockDA.BlobProof = blob.Proof([]*nmt.Proof{&proof}) @@ -244,7 +244,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { return mockDA, nil } -// exampleNMT creates a new NamespacedMerkleTree with the given namespace ID size and leaf namespace IDs. Each byte in the leavesNIDs parameter corresponds to one leaf's namespace ID. If nidSize is greater than 1, the function repeats each NID in leavesNIDs nidSize times before prepending it to the leaf data. + func exampleNMT(nidSize int, ignoreMaxNamespace bool, leavesNIDs ...byte) *nmt.NamespacedMerkleTree { tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(nidSize), nmt.IgnoreMaxNamespace(ignoreMaxNamespace)) for i, nid := range leavesNIDs { diff --git a/testutil/node.go b/testutil/node.go index 1f7f0955f..ac7e294b1 100644 --- a/testutil/node.go +++ b/testutil/node.go @@ -24,7 +24,7 @@ import ( func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, genesis *types.GenesisDoc) (*node.Node, error) { app := GetAppMock(EndBlock) - // Create proxy app + clientCreator := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() @@ -48,7 +48,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, signingKey, pubkey, _ := crypto.GenerateEd25519Key(rand.Reader) pubkeyBytes, _ := pubkey.Raw() - // Node config + nodeConfig := config.DefaultNodeConfig if blockManagerConfig == nil { @@ -62,7 +62,7 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, } nodeConfig.BlockManagerConfig = *blockManagerConfig - // SL config + nodeConfig.SettlementConfig = settlement.Config{ProposerPubKey: hex.EncodeToString(pubkeyBytes)} node, err := node.NewNode( diff --git a/testutil/p2p.go b/testutil/p2p.go index 8dd88fbe8..318abb499 100644 --- a/testutil/p2p.go +++ b/testutil/p2p.go @@ -45,10 +45,10 @@ type HostDescr struct { RealKey bool } -// copied from libp2p net/mock + var blackholeIP6 = net.ParseIP("100::") -// copied from libp2p net/mock + func getAddr(sk crypto.PrivKey) (multiaddr.Multiaddr, error) { id, err := peer.IDFromPrivateKey(sk) if err != nil { @@ -92,7 +92,7 @@ func StartTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]Hos err := mnet.LinkAll() require.NoError(err) - // prepare seed node lists + seeds := make([]string, n) for src, descr := range conf { require.Less(src, n) diff --git a/testutil/rpc.go b/testutil/rpc.go index 80b31c1e6..bbf17dae0 100644 --- a/testutil/rpc.go +++ b/testutil/rpc.go @@ -13,13 +13,13 @@ import ( ) func CreateLocalServer(t *testing.T) (*rpc.Server, net.Listener) { - // Create a new local listener + listener, err := nettest.NewLocalListener("tcp") require.NoError(t, err) serverReadyCh := make(chan bool, 1) var server *rpc.Server - // Start server with listener + go func() { node, err := CreateNode(true, nil, GenerateGenesis(0)) require.NoError(t, err) diff --git a/testutil/types.go b/testutil/types.go index 7f04ddd4d..70ce267db 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -21,9 +21,9 @@ import ( ) const ( - // BlockVersion is the default block version for testing + BlockVersion = 1 - // AppVersion is the default app version for testing + AppVersion = 0 SettlementAccountPrefix = "dym" @@ -63,7 +63,7 @@ func GenerateSettlementAddress() string { return addr } -// generateBlock generates random blocks. + func generateBlock(height uint64, proposerHash []byte, lastHeaderHash [32]byte) *types.Block { h := createRandomHashes() @@ -135,7 +135,7 @@ func GenerateBlocksWithTxs(startHeight uint64, num uint64, proposerKey crypto.Pr return blocks, nil } -// GenerateBlocks generates random blocks. + func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -163,7 +163,7 @@ func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, return blocks, nil } -// GenerateCommits generates commits based on passed blocks. + func GenerateCommits(blocks []*types.Block, proposerKey crypto.PrivKey) ([]*types.Commit, error) { commits := make([]*types.Commit, len(blocks)) @@ -205,7 +205,7 @@ func generateSignature(proposerKey crypto.PrivKey, header *types.Header) ([]byte return sign, nil } -// GenerateBatch generates a batch out of random blocks + func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) (*types.Batch, error) { blocks, err := GenerateBlocks(startHeight, endHeight-startHeight+1, proposerKey, lastBlockHeader) if err != nil { @@ -223,7 +223,7 @@ func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.Priv return batch, nil } -// GenerateLastBatch generates a final batch with LastBatch flag set to true and different NextSequencerHash + func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, nextSequencerKey crypto.PrivKey, lastHeaderHash [32]byte) (*types.Batch, error) { nextSequencerRaw, _ := nextSequencerKey.Raw() nextSeq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(nextSequencerRaw).PubKey(), 1)) @@ -248,7 +248,7 @@ func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto. return batch, nil } -// GenerateLastBlocks es similar a GenerateBlocks pero incluye el NextSequencerHash + func GenerateLastBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastHeaderHash [32]byte, nextSequencerHash [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -304,7 +304,7 @@ func MustGenerateBatchAndKey(startHeight uint64, endHeight uint64) *types.Batch return MustGenerateBatch(startHeight, endHeight, proposerKey) } -// GenerateRandomValidatorSet generates random validator sets + func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { return tmtypes.NewValidatorSet([]*tmtypes.Validator{ tmtypes.NewValidator(ed25519.GenPrivKey().PubKey(), 1), @@ -320,11 +320,11 @@ func GenerateSequencer() types.Sequencer { ) } -// GenerateStateWithSequencer generates an initial state for testing. + func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubkey tmcrypto.PubKey) *types.State { s := &types.State{ ChainID: "test-chain", - InitialHeight: uint64(initialHeight), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, LastResultsHash: GetEmptyLastResultsHash(), Version: tmstate.Version{ @@ -350,11 +350,11 @@ func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubk GenerateSettlementAddress(), []string{GenerateSettlementAddress()}, )) - s.SetHeight(uint64(lastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 + s.SetHeight(uint64(lastBlockHeight)) return s } -// GenerateGenesis generates a genesis for testing. + func GenerateGenesis(initialHeight int64) *tmtypes.GenesisDoc { return &tmtypes.GenesisDoc{ ChainID: "test-chain", diff --git a/types/batch.go b/types/batch.go index 14d486539..ecfadd20f 100644 --- a/types/batch.go +++ b/types/batch.go @@ -1,21 +1,21 @@ package types const ( - MaxBlockSizeAdjustment = 0.9 // have a safety margin of 10% in regard of MaxBlockBatchSizeBytes + MaxBlockSizeAdjustment = 0.9 ) -// Batch defines a struct for block aggregation for support of batching. -// TODO: maybe change to BlockBatch + + type Batch struct { Blocks []*Block Commits []*Commit - // LastBatch is true if this is the last batch of the sequencer (i.e completes it's rotation flow). + LastBatch bool DRSVersion []uint32 Revision uint64 } -// StartHeight is the height of the first block in the batch. + func (b Batch) StartHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -23,7 +23,7 @@ func (b Batch) StartHeight() uint64 { return b.Blocks[0].Header.Height } -// EndHeight is the height of the last block in the batch + func (b Batch) EndHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -31,14 +31,14 @@ func (b Batch) EndHeight() uint64 { return b.Blocks[len(b.Blocks)-1].Header.Height } -// NumBlocks is the number of blocks in the batch + func (b Batch) NumBlocks() uint64 { return uint64(len(b.Blocks)) } -// SizeBlockAndCommitBytes returns the sum of the size of bytes of the blocks and commits -// The actual size of the batch may be different due to additional metadata and protobuf -// optimizations. + + + func (b Batch) SizeBlockAndCommitBytes() int { cnt := 0 for _, block := range b.Blocks { diff --git a/types/block.go b/types/block.go index e6d2c1673..153eb3333 100644 --- a/types/block.go +++ b/types/block.go @@ -8,40 +8,40 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -// Header defines the structure of Dymint block header. + type Header struct { - // Block and App version + Version Version Height uint64 - Time int64 // UNIX time in nanoseconds. Use int64 as Golang stores UNIX nanoseconds in int64. + Time int64 - // prev block info + LastHeaderHash [32]byte - // hashes of block data - LastCommitHash [32]byte // commit from sequencer(s) from the last block - DataHash [32]byte // Block.Data root aka Transactions - ConsensusHash [32]byte // consensus params for current block - AppHash [32]byte // state after applying txs from height-1 + + LastCommitHash [32]byte + DataHash [32]byte + ConsensusHash [32]byte + AppHash [32]byte - // Root hash of all results from the txs from the previous block. - // This is ABCI specific but smart-contract chains require some way of committing - // to transaction receipts/results. + + + LastResultsHash [32]byte - // Note that the address can be derived from the pubkey which can be derived - // from the signature when using secp256k. - // We keep this in case users choose another signature format where the - // pubkey can't be recovered by the signature (e.g. ed25519). - ProposerAddress []byte // original proposer of the block + + + + + ProposerAddress []byte - // Hash of proposer validatorSet (compatible with tendermint) + SequencerHash [32]byte - // Hash of the next proposer validatorSet (compatible with tendermint) + NextSequencersHash [32]byte - // The Chain ID + ChainID string } @@ -54,16 +54,16 @@ var ( _ encoding.BinaryUnmarshaler = &Header{} ) -// Version captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -// This is equivalent to the tmversion.Consensus type in Tendermint. + + + + type Version struct { Block uint64 App uint64 } -// Block defines the structure of Dymint block. + type Block struct { Header Header Data Data @@ -83,7 +83,7 @@ var ( _ encoding.BinaryUnmarshaler = &Block{} ) -// Data defines Dymint block data. + type Data struct { Txs Txs IntermediateStateRoots IntermediateStateRoots @@ -91,16 +91,16 @@ type Data struct { ConsensusMessages []*proto.Any } -// EvidenceData defines how evidence is stored in block. + type EvidenceData struct { Evidence []Evidence } -// Commit contains evidence of block creation. + type Commit struct { Height uint64 HeaderHash [32]byte - // TODO(omritoptix): Change from []Signature to Signature as it should be one signature per block + Signatures []Signature TMSignature tmtypes.CommitSig } @@ -109,11 +109,11 @@ func (c Commit) SizeBytes() int { return c.ToProto().Size() } -// Signature represents signature of block creator. + type Signature []byte -// IntermediateStateRoots describes the state between transactions. -// They are required for fraud proofs. + + type IntermediateStateRoots struct { RawRootsList [][]byte } @@ -123,7 +123,7 @@ func GetLastCommitHash(lastCommit *Commit, header *Header) []byte { return lastABCICommit.Hash() } -// GetDataHash returns the hash of the block data to be set in the block header. + func GetDataHash(block *Block) []byte { abciData := tmtypes.Data{ Txs: ToABCIBlockDataTxs(&block.Data), diff --git a/types/block_source.go b/types/block_source.go index e6304c524..43a2a0be5 100644 --- a/types/block_source.go +++ b/types/block_source.go @@ -24,7 +24,7 @@ var AllSources = []string{"none", "produced", "gossip", "blocksync", "da", "loca type BlockMetaData struct { Source BlockSource DAHeight uint64 - SequencerSet Sequencers // The set of Rollapp sequencers that were present in the Hub while producing this block + SequencerSet Sequencers } type CachedBlock struct { diff --git a/types/conv.go b/types/conv.go index afbfc94a6..37f66eceb 100644 --- a/types/conv.go +++ b/types/conv.go @@ -6,22 +6,22 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -// ToABCIHeaderPB converts Dymint header to Header format defined in ABCI. -// Caller should fill all the fields that are not available in Dymint header (like ChainID). + + func ToABCIHeaderPB(header *Header) types.Header { tmheader := ToABCIHeader(header) return *tmheader.ToProto() } -// ToABCIHeader converts Dymint header to Header format defined in ABCI. -// Caller should fill all the fields that are not available in Dymint header (like ChainID). + + func ToABCIHeader(header *Header) tmtypes.Header { return tmtypes.Header{ Version: version.Consensus{ Block: header.Version.Block, App: header.Version.App, }, - Height: int64(header.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(header.Height), Time: header.GetTimestamp(), LastBlockID: tmtypes.BlockID{ Hash: header.LastHeaderHash[:], @@ -43,12 +43,12 @@ func ToABCIHeader(header *Header) tmtypes.Header { } } -// ToABCIBlock converts Dymint block into block format defined by ABCI. -// Returned block should pass `ValidateBasic`. + + func ToABCIBlock(block *Block) (*tmtypes.Block, error) { abciHeader := ToABCIHeader(&block.Header) abciCommit := ToABCICommit(&block.LastCommit, &block.Header) - // This assumes that we have only one signature + if len(abciCommit.Signatures) == 1 { abciCommit.Signatures[0].ValidatorAddress = block.Header.ProposerAddress } @@ -65,7 +65,7 @@ func ToABCIBlock(block *Block) (*tmtypes.Block, error) { return &abciBlock, nil } -// ToABCIBlockDataTxs converts Dymint block-data into block-data format defined by ABCI. + func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { txs := make([]tmtypes.Tx, len(data.Txs)) for i := range data.Txs { @@ -74,7 +74,7 @@ func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { return txs } -// ToABCIBlockMeta converts Dymint block into BlockMeta format defined by ABCI + func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { tmblock, err := ToABCIBlock(block) if err != nil { @@ -90,13 +90,13 @@ func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { }, nil } -// ToABCICommit converts Dymint commit into commit format defined by ABCI. -// This function only converts fields that are available in Dymint commit. -// Other fields (especially ValidatorAddress and Timestamp of Signature) has to be filled by caller. + + + func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { headerHash := header.Hash() tmCommit := tmtypes.Commit{ - Height: int64(commit.Height), //nolint:gosec // height is non-negative and falls in int64 + Height: int64(commit.Height), Round: 0, BlockID: tmtypes.BlockID{ Hash: headerHash[:], @@ -106,7 +106,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { }, }, } - // Check if TMSignature exists. if not use the previous dymint signature for backwards compatibility. + if len(commit.TMSignature.Signature) == 0 { for _, sig := range commit.Signatures { commitSig := tmtypes.CommitSig{ @@ -115,7 +115,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { } tmCommit.Signatures = append(tmCommit.Signatures, commitSig) } - // This assumes that we have only one signature + if len(commit.Signatures) == 1 { tmCommit.Signatures[0].ValidatorAddress = header.ProposerAddress tmCommit.Signatures[0].Timestamp = header.GetTimestamp() diff --git a/types/errors.go b/types/errors.go index 033c5bd80..418e5e5a6 100644 --- a/types/errors.go +++ b/types/errors.go @@ -24,11 +24,11 @@ var ( ErrEmptyProposerAddress = errors.New("no proposer address") ) -// TimeFraudMaxDrift is the maximum allowed time drift between the block time and the local time. + var TimeFraudMaxDrift = 10 * time.Minute -// ErrFraudHeightMismatch is the fraud that occurs when the height of the block is different from the expected -// next height of the state. + + type ErrFraudHeightMismatch struct { Expected uint64 Actual uint64 @@ -37,7 +37,7 @@ type ErrFraudHeightMismatch struct { Proposer []byte } -// NewErrFraudHeightMismatch creates a new ErrFraudHeightMismatch error. + func NewErrFraudHeightMismatch(expected uint64, header *Header) error { return &ErrFraudHeightMismatch{ Expected: expected, @@ -56,7 +56,7 @@ func (e ErrFraudHeightMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrFraudAppHashMismatch is the fraud that occurs when the AppHash of the block is different from the expected AppHash. + type ErrFraudAppHashMismatch struct { Expected [32]byte @@ -66,7 +66,7 @@ type ErrFraudAppHashMismatch struct { Proposer []byte } -// NewErrFraudAppHashMismatch creates a new ErrFraudAppHashMismatch error. + func NewErrFraudAppHashMismatch(expected [32]byte, header *Header) error { return &ErrFraudAppHashMismatch{ Expected: expected, @@ -86,7 +86,7 @@ func (e ErrFraudAppHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrLastResultsHashMismatch indicates a potential fraud when the LastResultsHash of a block does not match the expected value. + type ErrLastResultsHashMismatch struct { Expected [32]byte @@ -96,7 +96,7 @@ type ErrLastResultsHashMismatch struct { LastResultHash [32]byte } -// NewErrLastResultsHashMismatch creates a new ErrLastResultsHashMismatch error. + func NewErrLastResultsHashMismatch(expected [32]byte, header *Header) error { return &ErrLastResultsHashMismatch{ Expected: expected, @@ -116,7 +116,7 @@ func (e ErrLastResultsHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrTimeFraud represents an error indicating a possible fraud due to time drift. + type ErrTimeFraud struct { Drift time.Duration ProposerAddress []byte @@ -153,7 +153,7 @@ func (e ErrTimeFraud) Unwrap() error { return gerrc.ErrFault } -// ErrLastHeaderHashMismatch is the error that occurs when the last header hash does not match the expected value. + type ErrLastHeaderHashMismatch struct { Expected [32]byte LastHeaderHash [32]byte @@ -174,7 +174,7 @@ func (e ErrLastHeaderHashMismatch) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidChainID is the fraud that occurs when the chain ID of the block is different from the expected chain ID. + type ErrInvalidChainID struct { Expected string Block *Block @@ -200,8 +200,8 @@ func (e ErrInvalidChainID) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidBlockHeightFraud is the fraud that happens when the height that is on the commit header is -// different from the height of the block. + + type ErrInvalidBlockHeightFraud struct { Expected uint64 Header *Header @@ -227,7 +227,7 @@ func (e ErrInvalidBlockHeightFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidHeaderHashFraud indicates a potential fraud when the Header Hash does not match the expected value. + type ErrInvalidHeaderHashFraud struct { ExpectedHash [32]byte Header *Header @@ -253,7 +253,7 @@ func (e ErrInvalidHeaderHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidSignatureFraud indicates a potential fraud due to an invalid signature in the block. + type ErrInvalidSignatureFraud struct { Err error Header *Header @@ -280,7 +280,7 @@ func (e ErrInvalidSignatureFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidProposerAddressFraud indicates a potential fraud when the proposer's address is invalid. + type ErrInvalidProposerAddressFraud struct { ExpectedAddress []byte ActualAddress tmcrypto.Address @@ -308,7 +308,7 @@ func (e ErrInvalidProposerAddressFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidSequencerHashFraud indicates a potential fraud when the sequencer's hash is invalid. + type ErrInvalidSequencerHashFraud struct { ExpectedHash [32]byte ActualHash []byte @@ -336,7 +336,7 @@ func (e ErrInvalidSequencerHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidNextSequencersHashFraud indicates a potential fraud when the NextSequencersHash does not match the expected value. + type ErrInvalidNextSequencersHashFraud struct { ExpectedHash [32]byte Header Header @@ -361,7 +361,7 @@ func (e ErrInvalidNextSequencersHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrInvalidHeaderDataHashFraud indicates a potential fraud when the Header Data Hash does not match the expected value. + type ErrInvalidHeaderDataHashFraud struct { Expected [32]byte Actual [32]byte @@ -390,7 +390,7 @@ func (e ErrInvalidHeaderDataHashFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateNumBlocksNotMatchingFraud represents an error where the number of blocks in the state update does not match the expected number. + type ErrStateUpdateNumBlocksNotMatchingFraud struct { StateIndex uint64 SLNumBlocks uint64 @@ -418,8 +418,8 @@ func (e ErrStateUpdateNumBlocksNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateHeightNotMatchingFraud is the fraud that happens when the height that is on the commit header is -// different from the height of the block. + + type ErrStateUpdateHeightNotMatchingFraud struct { StateIndex uint64 SLBeginHeight uint64 @@ -449,7 +449,7 @@ func (e ErrStateUpdateHeightNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateStateRootNotMatchingFraud represents an error where the state roots do not match in the state update. + type ErrStateUpdateStateRootNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -478,7 +478,7 @@ func (e ErrStateUpdateStateRootNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateTimestampNotMatchingFraud represents an error where the timestamps do not match in the state update. + type ErrStateUpdateTimestampNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -506,7 +506,7 @@ func (e ErrStateUpdateTimestampNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateDoubleSigningFraud indicates a potential fraud due to double signing detected between DA and P2P blocks. + type ErrStateUpdateDoubleSigningFraud struct { DABlock *Block P2PBlock *Block @@ -571,7 +571,7 @@ func getJsonFromBlock(block *Block) ([]byte, error) { return jsonBlock, nil } -// ErrStateUpdateBlobNotAvailableFraud represents an error where a blob is not available in DA. + type ErrStateUpdateBlobNotAvailableFraud struct { StateIndex uint64 DA string @@ -599,7 +599,7 @@ func (e ErrStateUpdateBlobNotAvailableFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateBlobCorruptedFraud represents an error where a blob is corrupted in DA. + type ErrStateUpdateBlobCorruptedFraud struct { StateIndex uint64 DA string @@ -627,7 +627,7 @@ func (e ErrStateUpdateBlobCorruptedFraud) Unwrap() error { return gerrc.ErrFault } -// ErrStateUpdateDRSVersionFraud represents an error where the DRS versions do not match in the state update. + type ErrStateUpdateDRSVersionFraud struct { StateIndex uint64 Height uint64 diff --git a/types/evidence.go b/types/evidence.go index ba17e1b0b..8aff5b04f 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -3,19 +3,19 @@ package types import ( "time" - // TODO: either copy the vanilla abci types (or the protos) into this repo - // or, import the vanilla tendermint types instead. + + abci "github.com/tendermint/tendermint/abci/types" ) -// Evidence represents any provable malicious activity by a validator. -// Verification logic for each evidence is part of the evidence module. + + type Evidence interface { - ABCI() []abci.Evidence // forms individual evidence to be sent to the application - Bytes() []byte // bytes which comprise the evidence - Hash() []byte // hash of the evidence - Height() int64 // height of the infraction - String() string // string format of the evidence - Time() time.Time // time of the infraction - ValidateBasic() error // basic consistency check + ABCI() []abci.Evidence + Bytes() []byte + Hash() []byte + Height() int64 + String() string + Time() time.Time + ValidateBasic() error } diff --git a/types/hashing.go b/types/hashing.go index 17162ee0e..931df948a 100644 --- a/types/hashing.go +++ b/types/hashing.go @@ -1,6 +1,6 @@ package types -// Hash returns ABCI-compatible hash of a header. + func (h *Header) Hash() [32]byte { var hash [32]byte abciHeader := ToABCIHeader(h) @@ -8,7 +8,7 @@ func (h *Header) Hash() [32]byte { return hash } -// Hash returns ABCI-compatible hash of a block. + func (b *Block) Hash() [32]byte { return b.Header.Hash() } diff --git a/types/instruction.go b/types/instruction.go index ebae50aa5..8f735f6d1 100644 --- a/types/instruction.go +++ b/types/instruction.go @@ -33,7 +33,7 @@ func LoadInstructionFromDisk(dir string) (Instruction, error) { var instruction Instruction filePath := filepath.Join(dir, instructionFileName) - data, err := os.ReadFile(filePath) // nolint:gosec + data, err := os.ReadFile(filePath) if err != nil { return Instruction{}, err } diff --git a/types/logger.go b/types/logger.go index e2c8fcdac..dfc89d708 100644 --- a/types/logger.go +++ b/types/logger.go @@ -1,6 +1,6 @@ package types -// Logger interface is compatible with Tendermint logger + type Logger interface { Debug(msg string, keyvals ...interface{}) Info(msg string, keyvals ...interface{}) diff --git a/types/pb/dymensionxyz/dymension/rollapp/errors.go b/types/pb/dymensionxyz/dymension/rollapp/errors.go index 2caa18964..1d9d3c05c 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/errors.go +++ b/types/pb/dymensionxyz/dymension/rollapp/errors.go @@ -1,13 +1,13 @@ package rollapp -// DONTCOVER + import ( errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/gerr-cosmos/gerrc" ) -// x/rollapp module sentinel errors + var ( ErrRollappExists = errorsmod.Register(ModuleName, 1000, "rollapp already exists") ErrInvalidInitialSequencer = errorsmod.Register(ModuleName, 1001, "empty initial sequencer") @@ -44,7 +44,7 @@ var ( ErrInvalidRequest = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid request") ErrInvalidVMType = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid vm type") - /* ------------------------------ fraud related ----------------------------- */ + ErrDisputeAlreadyFinalized = errorsmod.Register(ModuleName, 2000, "disputed height already finalized") ErrDisputeAlreadyReverted = errorsmod.Register(ModuleName, 2001, "disputed height already reverted") ErrWrongClientId = errorsmod.Register(ModuleName, 2002, "client id does not match the rollapp") diff --git a/types/pb/dymensionxyz/dymension/rollapp/events.go b/types/pb/dymensionxyz/dymension/rollapp/events.go index ae0f6e3d1..259a12f03 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/events.go +++ b/types/pb/dymensionxyz/dymension/rollapp/events.go @@ -11,12 +11,12 @@ const ( AttributeKeyDAPath = "da_path" AttributeKeyStatus = "status" - // EventTypeFraud is emitted when a fraud evidence is submitted + EventTypeFraud = "fraud_proposal" AttributeKeyFraudHeight = "fraud_height" AttributeKeyFraudSequencer = "fraud_sequencer" AttributeKeyClientID = "client_id" - // EventTypeTransferGenesisTransfersEnabled is when the bridge is enabled + EventTypeTransferGenesisTransfersEnabled = "transfer_genesis_transfers_enabled" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/keys.go b/types/pb/dymensionxyz/dymension/rollapp/keys.go index ca4e7b64c..61858ca0d 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/keys.go +++ b/types/pb/dymensionxyz/dymension/rollapp/keys.go @@ -1,19 +1,19 @@ package rollapp const ( - // ModuleName defines the module name + ModuleName = "rollapp" - // StoreKey defines the primary module store key + StoreKey = ModuleName - // RouterKey is the message route for slashing + RouterKey = ModuleName - // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName - // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_rollapp" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go index 11b1c7f3c..0dec4fc93 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go +++ b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go @@ -25,7 +25,7 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidAddress, "invalid creator address (%s)", err) } - // an update can't be with no BDs + if msg.NumBlocks == uint64(0) { return errorsmod.Wrap(ErrInvalidNumBlocks, "number of blocks can not be zero") } @@ -34,22 +34,22 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidNumBlocks, "numBlocks(%d) + startHeight(%d) exceeds max uint64", msg.NumBlocks, msg.StartHeight) } - // check to see that update contains all BDs + if uint64(len(msg.BDs.BD)) != msg.NumBlocks { return errorsmod.Wrapf(ErrInvalidNumBlocks, "number of blocks (%d) != number of block descriptors(%d)", msg.NumBlocks, len(msg.BDs.BD)) } - // check to see that startHeight is not zaro + if msg.StartHeight == 0 { return errorsmod.Wrapf(ErrWrongBlockHeight, "StartHeight must be greater than zero") } - // check that the blocks are sequential by height + for bdIndex := uint64(0); bdIndex < msg.NumBlocks; bdIndex += 1 { if msg.BDs.BD[bdIndex].Height != msg.StartHeight+bdIndex { return ErrInvalidBlockSequence } - // check to see stateRoot is a 32 byte array + if len(msg.BDs.BD[bdIndex].StateRoot) != 32 { return errorsmod.Wrapf(ErrInvalidStateRoot, "StateRoot of block high (%d) must be 32 byte array. But received (%d) bytes", msg.BDs.BD[bdIndex].Height, len(msg.BDs.BD[bdIndex].StateRoot)) diff --git a/types/pb/dymensionxyz/dymension/rollapp/params.go b/types/pb/dymensionxyz/dymension/rollapp/params.go index f12bb0f0b..64c9ad818 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/params.go +++ b/types/pb/dymensionxyz/dymension/rollapp/params.go @@ -2,7 +2,7 @@ package rollapp import "gopkg.in/yaml.v2" -// String implements the Stringer interface. + func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/pb/dymensionxyz/dymension/sequencer/events.go b/types/pb/dymensionxyz/dymension/sequencer/events.go index eb93ddc7a..01fd6ea51 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/events.go +++ b/types/pb/dymensionxyz/dymension/sequencer/events.go @@ -1,27 +1,27 @@ package sequencer -// Incentive module event types. + const ( - // EventTypeCreateSequencer is emitted when a sequencer is created + EventTypeCreateSequencer = "create_sequencer" AttributeKeyRollappId = "rollapp_id" AttributeKeySequencer = "sequencer" AttributeKeyBond = "bond" AttributeKeyProposer = "proposer" - // EventTypeUnbonding is emitted when a sequencer is unbonding + EventTypeUnbonding = "unbonding" AttributeKeyCompletionTime = "completion_time" - // EventTypeNoBondedSequencer is emitted when no bonded sequencer is found for a rollapp + EventTypeNoBondedSequencer = "no_bonded_sequencer" - // EventTypeProposerRotated is emitted when a proposer is rotated + EventTypeProposerRotated = "proposer_rotated" - // EventTypeUnbonded is emitted when a sequencer is unbonded + EventTypeUnbonded = "unbonded" - // EventTypeSlashed is emitted when a sequencer is slashed + EventTypeSlashed = "slashed" ) diff --git a/types/pb/dymensionxyz/dymension/sequencer/keys.go b/types/pb/dymensionxyz/dymension/sequencer/keys.go index c4b84447f..8bae0a8a5 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/keys.go +++ b/types/pb/dymensionxyz/dymension/sequencer/keys.go @@ -11,63 +11,63 @@ import ( var _ binary.ByteOrder const ( - // ModuleName defines the module name + ModuleName = "sequencer" - // StoreKey defines the primary module store key + StoreKey = ModuleName - // RouterKey is the message route for slashing + RouterKey = ModuleName - // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName - // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_sequencer" ) var ( - // KeySeparator defines the separator for keys + KeySeparator = "/" - // SequencersKeyPrefix is the prefix to retrieve all Sequencers by their address - SequencersKeyPrefix = []byte{0x00} // prefix/seqAddr + + SequencersKeyPrefix = []byte{0x00} - // SequencersByRollappKeyPrefix is the prefix to retrieve all SequencersByRollapp - SequencersByRollappKeyPrefix = []byte{0x01} // prefix/rollappId + + SequencersByRollappKeyPrefix = []byte{0x01} BondedSequencersKeyPrefix = []byte{0xa1} UnbondedSequencersKeyPrefix = []byte{0xa2} UnbondingSequencersKeyPrefix = []byte{0xa3} - UnbondingQueueKey = []byte{0x41} // prefix for the timestamps in unbonding queue + UnbondingQueueKey = []byte{0x41} ) -/* --------------------- specific sequencer address keys -------------------- */ + func SequencerKey(sequencerAddress string) []byte { sequencerAddrBytes := []byte(sequencerAddress) return []byte(fmt.Sprintf("%s%s%s", SequencersKeyPrefix, KeySeparator, sequencerAddrBytes)) } -// SequencerByRollappByStatusKey returns the store key to retrieve a SequencersByRollapp from the index fields + func SequencerByRollappByStatusKey(rollappId, seqAddr string, status OperatingStatus) []byte { return append(SequencersByRollappByStatusKey(rollappId, status), []byte(seqAddr)...) } -/* ------------------------- multiple sequencers keys ------------------------ */ + func SequencersKey() []byte { return SequencersKeyPrefix } -// SequencersByRollappKey returns the store key to retrieve a SequencersByRollapp from the index fields + func SequencersByRollappKey(rollappId string) []byte { rollappIdBytes := []byte(rollappId) return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKeyPrefix, KeySeparator, rollappIdBytes)) } -// SequencersByRollappByStatusKey returns the store key to retrieve a SequencersByRollappByStatus from the index fields + func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) []byte { - // Get the relevant key prefix based on the packet status + var prefix []byte switch status { case Bonded: @@ -81,16 +81,16 @@ func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) [] return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKey(rollappId), KeySeparator, prefix)) } -/* -------------------------- unbonding queue keys -------------------------- */ + func UnbondingQueueByTimeKey(endTime time.Time) []byte { timeBz := sdk.FormatTimeBytes(endTime) prefixL := len(UnbondingQueueKey) bz := make([]byte, prefixL+len(timeBz)) - // copy the prefix + copy(bz[:prefixL], UnbondingQueueKey) - // copy the encoded time bytes + copy(bz[prefixL:prefixL+len(timeBz)], timeBz) return bz diff --git a/types/pb/dymensionxyz/dymension/sequencer/params.go b/types/pb/dymensionxyz/dymension/sequencer/params.go index 5bf8971f0..de39b13dc 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/params.go +++ b/types/pb/dymensionxyz/dymension/sequencer/params.go @@ -4,7 +4,7 @@ import ( "gopkg.in/yaml.v2" ) -// String implements the Stringer interface. + func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/rollapp.go b/types/rollapp.go index f6fcd1d14..87951daf5 100644 --- a/types/rollapp.go +++ b/types/rollapp.go @@ -14,7 +14,7 @@ type Revision struct { func (r Rollapp) LatestRevision() Revision { if len(r.Revisions) == 0 { - // Revision 0 if no revisions exist. + return Revision{} } return r.Revisions[len(r.Revisions)-1] diff --git a/types/sequencer_set.go b/types/sequencer_set.go index 6d40142e7..1d294b2c7 100644 --- a/types/sequencer_set.go +++ b/types/sequencer_set.go @@ -13,18 +13,18 @@ import ( "github.com/tendermint/tendermint/types" ) -// Sequencer is a struct that holds the sequencer's information and tendermint validator. -// It is populated from the Hub on start and is periodically updated from the Hub polling. -// Uses tendermint's validator types for compatibility. + + + type Sequencer struct { - // SettlementAddress is the address of the sequencer in the settlement layer (bech32 string) + SettlementAddress string - // RewardAddr is the bech32-encoded sequencer's reward address + RewardAddr string - // WhitelistedRelayers is a list of the whitelisted relayer addresses. Addresses are bech32-encoded strings. + WhitelistedRelayers []string - // val is a tendermint validator type for compatibility. Holds the public key and cons address. + val types.Validator } @@ -45,8 +45,8 @@ func NewSequencer( } } -// IsEmpty returns true if the sequencer is empty -// we check if the pubkey is nil + + func (s Sequencer) IsEmpty() bool { return s.val.PubKey == nil } @@ -71,7 +71,7 @@ func (s Sequencer) TMValset() (*types.ValidatorSet, error) { return types.ValidatorSetFromExistingValidators(s.TMValidators()) } -// Hash returns tendermint compatible hash of the sequencer + func (s Sequencer) Hash() ([]byte, error) { vs, err := s.TMValset() if err != nil { @@ -80,7 +80,7 @@ func (s Sequencer) Hash() ([]byte, error) { return vs.Hash(), nil } -// MustHash returns tendermint compatible hash of the sequencer + func (s Sequencer) MustHash() []byte { h, err := s.Hash() if err != nil { @@ -89,7 +89,7 @@ func (s Sequencer) MustHash() []byte { return h } -// AnyConsPubKey returns sequencer's consensus public key represented as Cosmos proto.Any. + func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { val := s.TMValidator() pubKey, err := cryptocodec.FromTmPubKeyInterface(val.PubKey) @@ -103,7 +103,7 @@ func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { return anyPK, nil } -// MustFullHash returns a "full" hash of the sequencer that includes all fields of the Sequencer type. + func (s Sequencer) MustFullHash() []byte { h := sha256.New() h.Write([]byte(s.SettlementAddress)) @@ -115,14 +115,14 @@ func (s Sequencer) MustFullHash() []byte { return h.Sum(nil) } -// SequencerListRightOuterJoin returns a set of sequencers that are in B but not in A. -// Sequencer is identified by a hash of all of it's fields. -// -// Example 1: -// -// s1 = {seq1, seq2, seq3} -// s2 = { seq2, seq3, seq4} -// s1 * s2 = { seq4} + + + + + + + + func SequencerListRightOuterJoin(A, B Sequencers) Sequencers { lhsSet := make(map[string]struct{}) for _, s := range A { @@ -141,13 +141,13 @@ func (s Sequencer) String() string { return fmt.Sprintf("Sequencer{SettlementAddress: %s RewardAddr: %s WhitelistedRelayers: %v Validator: %s}", s.SettlementAddress, s.RewardAddr, s.WhitelistedRelayers, s.val.String()) } -// Sequencers is a list of sequencers. + type Sequencers []Sequencer -// SequencerSet is a set of rollapp sequencers. It holds the entire set of sequencers -// that were ever associated with the rollapp (including bonded/unbonded/unbonding). -// It is populated from the Hub on start and is periodically updated from the Hub polling. -// This type is thread-safe. + + + + type SequencerSet struct { mu sync.RWMutex sequencers Sequencers @@ -160,7 +160,7 @@ func NewSequencerSet(s ...Sequencer) *SequencerSet { } } -// Set sets the sequencers of the sequencer set. + func (s *SequencerSet) Set(sequencers Sequencers) { s.mu.Lock() defer s.mu.Unlock() @@ -173,7 +173,7 @@ func (s *SequencerSet) GetAll() Sequencers { return slices.Clone(s.sequencers) } -// GetByHash gets the sequencer by hash. It returns an error if the hash is not found in the sequencer set. + func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -185,8 +185,8 @@ func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { return Sequencer{}, false } -// GetByAddress returns the sequencer with the given settlement address. -// used when handling events from the settlement, where the settlement address is used + + func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -198,7 +198,7 @@ func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) return Sequencer{}, false } -// GetByConsAddress returns the sequencer with the given consensus address. + func (s *SequencerSet) GetByConsAddress(consAddr []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -214,9 +214,9 @@ func (s *SequencerSet) String() string { return fmt.Sprintf("SequencerSet: %v", s.sequencers) } -/* -------------------------- backward compatibility ------------------------- */ -// old dymint version used tendermint.ValidatorSet for sequencers -// these methods are used for backward compatibility + + + func NewSequencerFromValidator(val types.Validator) *Sequencer { return &Sequencer{ diff --git a/types/serialization.go b/types/serialization.go index a4e79bb8e..14965e6a0 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -12,17 +12,17 @@ import ( pb "github.com/dymensionxyz/dymint/types/pb/dymint" ) -// MarshalBinary encodes Block into binary form and returns it. + func (b *Block) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// MarshalBinary encodes Batch into binary form and returns it. + func (b *Batch) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Block into object. + func (b *Block) UnmarshalBinary(data []byte) error { var pBlock pb.Block err := pBlock.Unmarshal(data) @@ -33,7 +33,7 @@ func (b *Block) UnmarshalBinary(data []byte) error { return err } -// UnmarshalBinary decodes binary form of Batch into object. + func (b *Batch) UnmarshalBinary(data []byte) error { var pBatch pb.Batch err := pBatch.Unmarshal(data) @@ -44,12 +44,12 @@ func (b *Batch) UnmarshalBinary(data []byte) error { return err } -// MarshalBinary encodes Header into binary form and returns it. + func (h *Header) MarshalBinary() ([]byte, error) { return h.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Header into object. + func (h *Header) UnmarshalBinary(data []byte) error { var pHeader pb.Header err := pHeader.Unmarshal(data) @@ -60,17 +60,17 @@ func (h *Header) UnmarshalBinary(data []byte) error { return err } -// MarshalBinary encodes Data into binary form and returns it. + func (d *Data) MarshalBinary() ([]byte, error) { return d.ToProto().Marshal() } -// MarshalBinary encodes Commit into binary form and returns it. + func (c *Commit) MarshalBinary() ([]byte, error) { return c.ToProto().Marshal() } -// UnmarshalBinary decodes binary form of Commit into object. + func (c *Commit) UnmarshalBinary(data []byte) error { var pCommit pb.Commit err := pCommit.Unmarshal(data) @@ -81,7 +81,7 @@ func (c *Commit) UnmarshalBinary(data []byte) error { return err } -// ToProto converts Header into protobuf representation and returns it. + func (h *Header) ToProto() *pb.Header { return &pb.Header{ Version: &pb.Version{Block: h.Version.Block, App: h.Version.App}, @@ -101,7 +101,7 @@ func (h *Header) ToProto() *pb.Header { } } -// FromProto fills Header with data from its protobuf representation. + func (h *Header) FromProto(other *pb.Header) error { h.Version.Block = other.Version.Block h.Version.App = other.Version.App @@ -140,8 +140,8 @@ func (h *Header) FromProto(other *pb.Header) error { return nil } -// safeCopy copies bytes from src slice into dst slice if both have same size. -// It returns true if sizes of src and dst are the same. + + func safeCopy(dst, src []byte) bool { if len(src) != len(dst) { return false @@ -150,7 +150,7 @@ func safeCopy(dst, src []byte) bool { return true } -// ToProto converts Block into protobuf representation and returns it. + func (b *Block) ToProto() *pb.Block { return &pb.Block{ Header: b.Header.ToProto(), @@ -159,7 +159,7 @@ func (b *Block) ToProto() *pb.Block { } } -// ToProto converts Batch into protobuf representation and returns it. + func (b *Batch) ToProto() *pb.Batch { return &pb.Batch{ StartHeight: b.StartHeight(), @@ -169,7 +169,7 @@ func (b *Batch) ToProto() *pb.Batch { } } -// ToProto converts Data into protobuf representation and returns it. + func (d *Data) ToProto() *pb.Data { return &pb.Data{ Txs: txsToByteSlices(d.Txs), @@ -179,7 +179,7 @@ func (d *Data) ToProto() *pb.Data { } } -// FromProto fills Block with data from its protobuf representation. + func (b *Block) FromProto(other *pb.Block) error { err := b.Header.FromProto(other.Header) if err != nil { @@ -199,7 +199,7 @@ func (b *Block) FromProto(other *pb.Block) error { return nil } -// FromProto fills Batch with data from its protobuf representation. + func (b *Batch) FromProto(other *pb.Batch) error { n := len(other.Blocks) start := other.StartHeight @@ -215,7 +215,7 @@ func (b *Batch) FromProto(other *pb.Batch) error { return nil } -// ToProto converts Commit into protobuf representation and returns it. + func (c *Commit) ToProto() *pb.Commit { return &pb.Commit{ Height: c.Height, @@ -230,14 +230,14 @@ func (c *Commit) ToProto() *pb.Commit { } } -// FromProto fills Commit with data from its protobuf representation. + func (c *Commit) FromProto(other *pb.Commit) error { c.Height = other.Height if !safeCopy(c.HeaderHash[:], other.HeaderHash) { return errors.New("invalid length of HeaderHash") } c.Signatures = byteSlicesToSignatures(other.Signatures) - // For backwards compatibility with old state files that don't have this field. + if other.TmSignature != nil { c.TMSignature = types.CommitSig{ BlockIDFlag: types.BlockIDFlag(other.TmSignature.BlockIdFlag), @@ -250,7 +250,7 @@ func (c *Commit) FromProto(other *pb.Commit) error { return nil } -// ToProto converts State into protobuf representation and returns it. + func (s *State) ToProto() (*pb.State, error) { var proposerProto *pb.Sequencer proposer := s.GetProposer() @@ -265,25 +265,25 @@ func (s *State) ToProto() (*pb.State, error) { return &pb.State{ Version: &s.Version, ChainId: s.ChainID, - InitialHeight: int64(s.InitialHeight), //nolint:gosec // height is non-negative and falls in int64 - LastBlockHeight: int64(s.Height()), //nolint:gosec // height is non-negative and falls in int64 + InitialHeight: int64(s.InitialHeight), + LastBlockHeight: int64(s.Height()), ConsensusParams: s.ConsensusParams, LastResultsHash: s.LastResultsHash[:], LastHeaderHash: s.LastHeaderHash[:], AppHash: s.AppHash[:], RollappParams: s.RollappParams, Proposer: proposerProto, - RevisionStartHeight: int64(s.RevisionStartHeight), //nolint:gosec // height is non-negative and falls in int64 + RevisionStartHeight: int64(s.RevisionStartHeight), }, nil } -// FromProto fills State with data from its protobuf representation. + func (s *State) FromProto(other *pb.State) error { s.Version = *other.Version s.ChainID = other.ChainId - s.InitialHeight = uint64(other.InitialHeight) //nolint:gosec // height is non-negative and falls in int64 - s.SetHeight(uint64(other.LastBlockHeight)) //nolint:gosec // height is non-negative and falls in int64 - s.RevisionStartHeight = uint64(other.RevisionStartHeight) //nolint:gosec // height is non-negative and falls in int64 + s.InitialHeight = uint64(other.InitialHeight) + s.SetHeight(uint64(other.LastBlockHeight)) + s.RevisionStartHeight = uint64(other.RevisionStartHeight) if other.Proposer != nil { proposer, err := SequencerFromProto(other.Proposer) if err != nil { @@ -291,7 +291,7 @@ func (s *State) FromProto(other *pb.State) error { } s.SetProposer(proposer) } else { - // proposer may be nil in the state + s.SetProposer(nil) } @@ -303,7 +303,7 @@ func (s *State) FromProto(other *pb.State) error { return nil } -// ToProto converts Sequencer into protobuf representation and returns it. + func (s *Sequencer) ToProto() (*pb.Sequencer, error) { if s == nil { return nil, fmt.Errorf("nil sequencer") @@ -320,7 +320,7 @@ func (s *Sequencer) ToProto() (*pb.Sequencer, error) { }, nil } -// SequencerFromProto fills Sequencer with data from its protobuf representation. + func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { if seq == nil { return nil, fmt.Errorf("nil sequencer") @@ -337,7 +337,7 @@ func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { }, nil } -// ToProto converts Sequencers into protobuf representation and returns it. + func (s Sequencers) ToProto() (*pb.SequencerSet, error) { seqs := make([]pb.Sequencer, len(s)) for i, seq := range s { @@ -350,7 +350,7 @@ func (s Sequencers) ToProto() (*pb.SequencerSet, error) { return &pb.SequencerSet{Sequencers: seqs}, nil } -// SequencersFromProto fills Sequencers with data from its protobuf representation. + func SequencersFromProto(s *pb.SequencerSet) (Sequencers, error) { if s == nil { return Sequencers{}, fmt.Errorf("nil sequencer set") @@ -389,7 +389,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { var ret []*abci.Evidence for _, e := range evidence.Evidence { for _, ae := range e.ABCI() { - ret = append(ret, &ae) //#nosec + ret = append(ret, &ae) } } return ret @@ -397,7 +397,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { func evidenceFromProto([]*abci.Evidence) EvidenceData { var ret EvidenceData - // TODO(tzdybal): right now Evidence is just an interface without implementations + return ret } @@ -423,7 +423,7 @@ func byteSlicesToSignatures(bytes [][]byte) []Signature { return sigs } -// Convert a list of blocks to a list of protobuf blocks. + func blocksToProto(blocks []*Block) []*pb.Block { pbBlocks := make([]*pb.Block, len(blocks)) for i, b := range blocks { @@ -432,7 +432,7 @@ func blocksToProto(blocks []*Block) []*pb.Block { return pbBlocks } -// protoToBlocks converts a list of protobuf blocks to a list of go struct blocks. + func protoToBlocks(pbBlocks []*pb.Block) []*Block { blocks := make([]*Block, len(pbBlocks)) for i, b := range pbBlocks { @@ -445,7 +445,7 @@ func protoToBlocks(pbBlocks []*pb.Block) []*Block { return blocks } -// commitsToProto converts a list of commits to a list of protobuf commits. + func commitsToProto(commits []*Commit) []*pb.Commit { pbCommits := make([]*pb.Commit, len(commits)) for i, c := range commits { @@ -454,7 +454,7 @@ func commitsToProto(commits []*Commit) []*pb.Commit { return pbCommits } -// protoToCommits converts a list of protobuf commits to a list of go struct commits. + func protoToCommits(pbCommits []*pb.Commit) []*Commit { commits := make([]*Commit, len(pbCommits)) for i, c := range pbCommits { diff --git a/types/state.go b/types/state.go index aa96bc985..cf1442b18 100644 --- a/types/state.go +++ b/types/state.go @@ -5,7 +5,7 @@ import ( "fmt" "sync/atomic" - // TODO(tzdybal): copy to local project? + tmcrypto "github.com/tendermint/tendermint/crypto" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" @@ -16,34 +16,34 @@ import ( const rollappparams_modulename = "rollappparams" -// State contains information about current state of the blockchain. + type State struct { Version tmstate.Version RevisionStartHeight uint64 - // immutable + ChainID string - InitialHeight uint64 // should be 1, not 0, when starting from height 1 + InitialHeight uint64 - // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight atomic.Uint64 - // Proposer is a sequencer that acts as a proposer. Can be nil if no proposer is set. + Proposer atomic.Pointer[Sequencer] - // Consensus parameters used for validating blocks. - // Changes returned by EndBlock and updated after Commit. + + ConsensusParams tmproto.ConsensusParams - // Merkle root of the results from executing prev block + LastResultsHash [32]byte - // the latest AppHash we've received from calling abci.Commit() + AppHash [32]byte - // New rollapp parameters . + RollappParams dymint.RollappParams - // LastHeaderHash is the hash of the last block header. + LastHeaderHash [32]byte } @@ -59,7 +59,7 @@ func (s *State) GetProposerPubKey() tmcrypto.PubKey { return proposer.PubKey() } -// GetProposerHash returns the hash of the proposer + func (s *State) GetProposerHash() []byte { proposer := s.Proposer.Load() if proposer == nil { @@ -68,7 +68,7 @@ func (s *State) GetProposerHash() []byte { return proposer.MustHash() } -// SetProposer sets the proposer. It may set the proposer to nil. + func (s *State) SetProposer(proposer *Sequencer) { s.Proposer.Store(proposer) } @@ -81,18 +81,18 @@ type RollappParams struct { Params *dymint.RollappParams } -// SetHeight sets the height saved in the Store if it is higher than the existing height -// returns OK if the value was updated successfully or did not need to be updated + + func (s *State) SetHeight(height uint64) { s.LastBlockHeight.Store(height) } -// Height returns height of the highest block saved in the Store. + func (s *State) Height() uint64 { return s.LastBlockHeight.Load() } -// NextHeight returns the next height that expected to be stored in store. + func (s *State) NextHeight() uint64 { if s.IsGenesis() { return s.InitialHeight @@ -100,7 +100,7 @@ func (s *State) NextHeight() uint64 { return s.Height() + 1 } -// SetRollappParamsFromGenesis sets the rollapp consensus params from genesis + func (s *State) SetRollappParamsFromGenesis(appState json.RawMessage) error { var objmap map[string]json.RawMessage err := json.Unmarshal(appState, &objmap) diff --git a/types/tx.go b/types/tx.go index 0565c5a47..fe4d1f6fa 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,20 +6,20 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" ) -// Tx represents transaction. + type Tx []byte -// Txs represents a slice of transactions. + type Txs []Tx -// Hash computes the TMHASH hash of the wire encoded transaction. + func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } -// Proof returns a simple merkle proof for this node. -// Panics if i < 0 or i >= len(txs) -// TODO: optimize this! + + + func (txs Txs) Proof(i int) TxProof { l := len(txs) bzs := make([][]byte, l) @@ -35,7 +35,7 @@ func (txs Txs) Proof(i int) TxProof { } } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. + type TxProof struct { RootHash tmbytes.HexBytes `json:"root_hash"` Data Tx `json:"data"` diff --git a/types/validation.go b/types/validation.go index aa5bedae4..3b7c37f48 100644 --- a/types/validation.go +++ b/types/validation.go @@ -21,7 +21,7 @@ func ValidateProposedTransition(state *State, block *Block, commit *Commit, prop return nil } -// ValidateBasic performs basic validation of a block. + func (b *Block) ValidateBasic() error { err := b.Header.ValidateBasic() if err != nil { @@ -93,7 +93,7 @@ func (b *Block) ValidateWithState(state *State) error { return nil } -// ValidateBasic performs basic validation of a header. + func (h *Header) ValidateBasic() error { if len(h.ProposerAddress) == 0 { return ErrEmptyProposerAddress @@ -102,13 +102,13 @@ func (h *Header) ValidateBasic() error { return nil } -// ValidateBasic performs basic validation of block data. -// Actually it's a placeholder, because nothing is checked. + + func (d *Data) ValidateBasic() error { return nil } -// ValidateBasic performs basic validation of a commit. + func (c *Commit) ValidateBasic() error { if c.Height > 0 { if len(c.Signatures) != 1 { @@ -133,7 +133,7 @@ func (c *Commit) ValidateWithHeader(proposerPubKey tmcrypto.PubKey, header *Head return err } - // commit is validated to have single signature + if !proposerPubKey.VerifySignature(abciHeaderBytes, c.Signatures[0]) { return NewErrInvalidSignatureFraud(ErrInvalidSignature, header, c) } diff --git a/utils/atomic/funcs.go b/utils/atomic/funcs.go index 1812d0959..d6cca097e 100644 --- a/utils/atomic/funcs.go +++ b/utils/atomic/funcs.go @@ -4,12 +4,10 @@ import ( "sync/atomic" ) -/* -TODO: move to sdk-utils -*/ -// Uint64Sub does x := x-y and returns the new value of x + + func Uint64Sub(x *atomic.Uint64, y uint64) uint64 { - // Uses math + return x.Add(^(y - 1)) } diff --git a/utils/channel/funcs.go b/utils/channel/funcs.go index 614414a3f..2513314dd 100644 --- a/utils/channel/funcs.go +++ b/utils/channel/funcs.go @@ -1,7 +1,7 @@ package channel -// DrainForever will drain the channels in separate go routines in a loop forever -// Intended for tests only + + func DrainForever[T any](chs ...<-chan T) { for _, ch := range chs { go func() { @@ -12,17 +12,17 @@ func DrainForever[T any](chs ...<-chan T) { } } -// Nudger can be used to make a goroutine ('A') sleep, and have another goroutine ('B') wake him up -// A will not block if B is not asleep. + + type Nudger struct { - C chan struct{} // Receive on C to sleep + C chan struct{} } func NewNudger() *Nudger { return &Nudger{make(chan struct{})} } -// Nudge wakes up the waiting thread if any. Non blocking. + func (w Nudger) Nudge() { select { case w.C <- struct{}{}: diff --git a/utils/errors/err_group.go b/utils/errors/err_group.go index c4d82409a..6ddb5d414 100644 --- a/utils/errors/err_group.go +++ b/utils/errors/err_group.go @@ -5,14 +5,12 @@ import ( "golang.org/x/sync/errgroup" ) -/* -TODO: move to sdk-utils -*/ - -// ErrGroupGoLog calls eg.Go on the errgroup but it will log the error immediately when it occurs -// instead of waiting for all goroutines in the group to finish first. This has the advantage of making sure all -// errors are logged, not just the first one, and it is more immediate. Also, it is guaranteed, in case that -// of the goroutines is not properly context aware. + + + + + + func ErrGroupGoLog(eg *errgroup.Group, logger types.Logger, fn func() error) { eg.Go(func() error { err := fn() diff --git a/utils/event/funcs.go b/utils/event/funcs.go index 8b76b7ce0..000cbf3f0 100644 --- a/utils/event/funcs.go +++ b/utils/event/funcs.go @@ -12,9 +12,9 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) -// MustSubscribe subscribes to events and sends back a callback -// clientID is essentially the subscriber id, see https://pkg.go.dev/github.com/tendermint/tendermint/libs/pubsub#pkg-overview -// - will not panic on context cancel or deadline exceeded + + + func MustSubscribe( ctx context.Context, pubsubServer *pubsub.Server, @@ -46,7 +46,7 @@ func MustSubscribe( } } -// MustPublish submits an event or panics - will not panic on context cancel or deadline exceeded + func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface{}, events map[string][]string) { err := pubsubServer.PublishWithEvents(ctx, msg, events) if err != nil && !errors.Is(err, context.Canceled) { @@ -54,7 +54,7 @@ func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface } } -// QueryFor returns a query for the given event. + func QueryFor(eventTypeKey, eventType string) tmpubsub.Query { return tmquery.MustParse(fmt.Sprintf("%s='%s'", eventTypeKey, eventType)) } diff --git a/utils/queue/queue.go b/utils/queue/queue.go index 17b760ecd..4600ddd86 100644 --- a/utils/queue/queue.go +++ b/utils/queue/queue.go @@ -5,40 +5,40 @@ import ( "strings" ) -// Queue holds elements in an array-list. -// This implementation is NOT thread-safe! + + type Queue[T any] struct { elements []T } -// FromSlice instantiates a new queue from the given slice. + func FromSlice[T any](s []T) *Queue[T] { return &Queue[T]{elements: s} } -// New instantiates a new empty queue + func New[T any]() *Queue[T] { return &Queue[T]{elements: make([]T, 0)} } -// Enqueue adds a value to the end of the queue + func (q *Queue[T]) Enqueue(values ...T) { q.elements = append(q.elements, values...) } -// DequeueAll returns all queued elements (FIFO order) and cleans the entire queue. + func (q *Queue[T]) DequeueAll() []T { values := q.elements q.elements = make([]T, 0) return values } -// Size returns number of elements within the queue. + func (q *Queue[T]) Size() int { return len(q.elements) } -// String returns a string representation. + func (q *Queue[T]) String() string { str := "Queue[" values := []string{} diff --git a/utils/retry/backoff.go b/utils/retry/backoff.go index 05d7ac53d..b9276edaa 100644 --- a/utils/retry/backoff.go +++ b/utils/retry/backoff.go @@ -10,14 +10,14 @@ const ( defaultBackoffFactor = 2 ) -// BackoffConfig is a configuration for a backoff, it's used to create new instances + type BackoffConfig struct { InitialDelay time.Duration `json:"initial_delay"` MaxDelay time.Duration `json:"max_delay"` GrowthFactor float64 `json:"growth_factor"` } -// Backoff creates a new Backoff instance with the configuration (starting at 0 attempts made so far) + func (c BackoffConfig) Backoff() Backoff { return Backoff{ delay: c.InitialDelay, @@ -40,16 +40,16 @@ func WithInitialDelay(d time.Duration) BackoffOption { } } -// WithMaxDelay sets the maximum delay for the backoff. The delay will not exceed this value. -// Set 0 to disable the maximum delay. + + func WithMaxDelay(d time.Duration) BackoffOption { return func(b *BackoffConfig) { b.MaxDelay = d } } -// WithGrowthFactor sets the growth factor for the backoff. The delay will be multiplied by this factor on each call to Delay. -// The factor should be greater than 1.0 + + func WithGrowthFactor(x float64) BackoffOption { return func(b *BackoffConfig) { b.GrowthFactor = x @@ -68,7 +68,7 @@ func NewBackoffConfig(opts ...BackoffOption) BackoffConfig { return ret } -// Delay returns the current delay. The subsequent delay will be increased by the growth factor up to the maximum. + func (b *Backoff) Delay() time.Duration { ret := b.delay b.delay = time.Duration(float64(b.delay) * b.growthFactor) @@ -78,7 +78,7 @@ func (b *Backoff) Delay() time.Duration { return ret } -// Sleep sleeps for the current delay. The subsequent delay will be increased by the growth factor up to the maximum. + func (b *Backoff) Sleep() { time.Sleep(b.Delay()) } diff --git a/utils/retry/doc.go b/utils/retry/doc.go index fe69a7266..6d41b0f16 100644 --- a/utils/retry/doc.go +++ b/utils/retry/doc.go @@ -1,4 +1,4 @@ -// Package retry shall be used alongside "github.com/avast/retry-go/v4" for simple retry patterns -// which the avast package makes difficult. -// Methods in here should be simple and not warrant another dependency. + + + package retry diff --git a/version/version.go b/version/version.go index acbae16e8..d461e5b9b 100644 --- a/version/version.go +++ b/version/version.go @@ -15,5 +15,5 @@ func GetDRSVersion() (uint32, error) { if err != nil { return uint32(0), fmt.Errorf("converting DRS version to int: %v", err) } - return uint32(currentDRS), nil //nolint:gosec // DRS is uint32 + return uint32(currentDRS), nil } From 46319fde5746ca3a51d9cd89abee75659c84cd91 Mon Sep 17 00:00:00 2001 From: danwt <30197399+danwt@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:00:46 +0000 Subject: [PATCH 4/4] fmt --- block/balance.go | 1 - block/block.go | 63 +---- block/block_cache.go | 1 - block/consensus.go | 1 - block/executor.go | 30 +-- block/fork.go | 57 +--- block/fraud.go | 6 - block/initchain.go | 7 +- block/manager.go | 83 +----- block/modes.go | 26 +- block/p2p.go | 10 +- block/produce.go | 77 +----- block/production_test.go | 1 - block/pruning.go | 10 +- block/pruning_test.go | 1 - block/retriever.go | 18 +- block/sequencers.go | 34 +-- block/slvalidator.go | 37 +-- block/slvalidator_test.go | 6 - block/state.go | 29 +- block/submit.go | 64 ++--- block/sync.go | 28 +- block/validate.go | 7 +- cmd/dymint/commands/init.go | 4 - cmd/dymint/commands/root.go | 4 - cmd/dymint/commands/show_node_id.go | 3 +- cmd/dymint/commands/show_sequencer.go | 2 - cmd/dymint/commands/start.go | 6 - cmd/dymint/main.go | 1 - config/config.go | 44 +--- config/defaults.go | 5 - config/flags.go | 6 +- config/p2p.go | 17 +- config/rpc.go | 23 -- config/toml.go | 9 - conv/config.go | 6 +- conv/crypto.go | 1 - da/avail/avail.go | 42 +-- da/celestia/celestia.go | 40 +-- da/celestia/config.go | 4 +- da/celestia/mock/messages.go | 13 +- da/celestia/mock/server.go | 4 - da/celestia/rpc.go | 9 - da/celestia/types/rpc.go | 3 - da/celestia/types/types.go | 34 +-- da/da.go | 87 ++---- da/errors.go | 21 +- da/grpc/grpc.go | 17 +- da/grpc/mockserv/mockserv.go | 1 - da/local/local.go | 23 +- da/registry/registry.go | 3 - indexers/blockindexer/block.go | 7 - indexers/blockindexer/kv/kv.go | 75 +----- indexers/blockindexer/kv/kv_test.go | 2 - indexers/blockindexer/null/null.go | 1 - indexers/blockindexer/query_range.go | 17 +- indexers/txindex/indexer.go | 13 - indexers/txindex/indexer_service.go | 16 -- indexers/txindex/indexer_service_test.go | 33 +-- indexers/txindex/kv/kv.go | 112 +------- indexers/txindex/kv/kv_test.go | 3 +- indexers/txindex/kv/utils.go | 1 - indexers/txindex/null/null.go | 4 - mempool/cache.go | 16 -- mempool/clist/clist.go | 79 ++---- mempool/ids.go | 2 - mempool/mempool.go | 64 ----- mempool/metrics.go | 20 -- mempool/mock/mempool.go | 1 - mempool/tx.go | 6 - mempool/v1/mempool.go | 248 ++---------------- mempool/v1/tx.go | 27 +- .../dymint/block/mock_ExecutorI.go | 66 +---- .../dymint/block/mock_FraudHandler.go | 13 +- .../dymint/da/avail/mock_SubstrateApiI.go | 194 +------------- .../celestia/types/mock_CelestiaRPCClient.go | 51 +--- .../da/mock_DataAvailabilityLayerClient.go | 42 +-- .../dymint/p2p/mock_ProposerGetter.go | 14 +- .../dymint/p2p/mock_StateGetter.go | 14 +- .../settlement/dymension/mock_CosmosClient.go | 53 +--- .../dymint/settlement/mock_ClientI.go | 75 +----- .../dymensionxyz/dymint/store/mock_Store.go | 140 +--------- .../sequencer/types/mock_QueryClient.go | 50 +--- .../dymension/rollapp/mock_QueryClient.go | 68 +---- .../dymension/sequencer/mock_QueryClient.go | 56 +--- .../tendermint/abci/types/mock_Application.go | 59 +---- .../tendermint/proxy/mock_AppConnConsensus.go | 34 +-- .../tendermint/proxy/mock_AppConns.go | 51 +--- node/events/types.go | 9 - node/mempool/mempool.go | 12 +- node/node.go | 27 +- node/node_test.go | 1 - p2p/block.go | 13 +- p2p/block_sync.go | 26 +- p2p/block_sync_dag.go | 15 -- p2p/blocks_received.go | 7 +- p2p/client.go | 76 +----- p2p/events.go | 11 - p2p/gossip.go | 13 - p2p/validator.go | 12 +- rpc/client/client.go | 144 ++-------- rpc/client/client_test.go | 2 - rpc/client/utils.go | 7 +- rpc/json/handler.go | 17 +- rpc/json/service.go | 16 +- rpc/json/types.go | 14 - rpc/json/ws.go | 5 +- rpc/middleware/client.go | 4 - rpc/middleware/registry.go | 6 - rpc/middleware/status.go | 2 +- rpc/server.go | 16 +- settlement/config.go | 5 +- settlement/dymension/cosmosclient.go | 5 - settlement/dymension/dymension.go | 62 +---- settlement/dymension/events.go | 14 +- settlement/dymension/options.go | 6 - settlement/dymension/utils.go | 4 - settlement/errors.go | 1 - settlement/events.go | 10 +- settlement/grpc/grpc.go | 25 +- settlement/local/local.go | 34 +-- settlement/registry/registry.go | 9 +- settlement/settlement.go | 53 ++-- store/badger.go | 56 +--- store/prefix.go | 13 - store/pruning.go | 4 - store/store.go | 24 -- store/storeIface.go | 37 +-- test/loadtime/cmd/load/main.go | 12 +- test/loadtime/cmd/report/main.go | 3 - test/loadtime/payload/payload.go | 16 +- test/loadtime/report/report.go | 40 +-- testutil/block.go | 9 +- testutil/logger.go | 13 - testutil/mocks.go | 34 +-- testutil/node.go | 4 +- testutil/p2p.go | 3 - testutil/rpc.go | 3 +- testutil/types.go | 16 +- types/batch.go | 12 +- types/block.go | 41 +-- types/block_source.go | 2 +- types/conv.go | 21 +- types/errors.go | 28 -- types/evidence.go | 18 +- types/hashing.go | 2 - types/instruction.go | 2 +- types/logger.go | 1 - .../dymensionxyz/dymension/rollapp/errors.go | 4 - .../dymensionxyz/dymension/rollapp/events.go | 2 - .../pb/dymensionxyz/dymension/rollapp/keys.go | 5 - .../dymension/rollapp/message_update_state.go | 6 +- .../dymensionxyz/dymension/rollapp/params.go | 1 - .../dymension/sequencer/events.go | 7 - .../dymensionxyz/dymension/sequencer/keys.go | 24 +- .../dymension/sequencer/params.go | 1 - types/pb/dymint/state.pb.go | 2 +- types/rollapp.go | 1 - types/sequencer_set.go | 37 +-- types/serialization.go | 49 +--- types/state.go | 22 +- types/tx.go | 7 - types/validation.go | 6 - utils/atomic/funcs.go | 4 - utils/channel/funcs.go | 7 +- utils/errors/err_group.go | 6 - utils/event/funcs.go | 5 - utils/queue/queue.go | 8 - utils/retry/backoff.go | 8 - utils/retry/doc.go | 3 - version/version.go | 2 +- 171 files changed, 489 insertions(+), 3581 deletions(-) diff --git a/block/balance.go b/block/balance.go index f77b518f3..91c81778b 100644 --- a/block/balance.go +++ b/block/balance.go @@ -14,7 +14,6 @@ import ( const CheckBalancesInterval = 3 * time.Minute - func (m *Manager) MonitorBalances(ctx context.Context) error { ticker := time.NewTicker(CheckBalancesInterval) defer ticker.Stop() diff --git a/block/block.go b/block/block.go index b8a6f3913..1832e3079 100644 --- a/block/block.go +++ b/block/block.go @@ -11,12 +11,11 @@ import ( "github.com/dymensionxyz/dymint/types" ) - func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { validateWithFraud := func() error { if err := m.validateBlockBeforeApply(block, commit); err != nil { m.blockCache.Delete(block.Header.Height) - + return fmt.Errorf("block not valid at height %d, dropping it: err:%w", block.Header.Height, err) } @@ -29,27 +28,15 @@ func (m *Manager) applyBlockWithFraudHandling(block *types.Block, commit *types. err := validateWithFraud() if errors.Is(err, gerrc.ErrFault) { - - - - m.FraudHandler.HandleFault(m.Ctx, err) } return err } - - - - - - func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMetaData types.BlockMetaData) error { var retainHeight int64 - - if block.Header.Height != m.State.NextHeight() { return types.ErrInvalidBlockHeight } @@ -58,13 +45,11 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("Applying block", "height", block.Header.Height, "source", blockMetaData.Source.String()) - isBlockAlreadyApplied, err := m.isHeightAlreadyApplied(block.Header.Height) if err != nil { return fmt.Errorf("check if block is already applied: %w", err) } - - + if isBlockAlreadyApplied { err := m.UpdateStateFromApp(block.Header.Hash()) if err != nil { @@ -73,7 +58,7 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Info("updated state from app commit", "height", block.Header.Height) } else { var appHash []byte - + _, err = m.Store.SaveBlock(block, commit, nil) if err != nil { return fmt.Errorf("save block: %w", err) @@ -104,16 +89,11 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return fmt.Errorf("add drs version: %w", err) } - appHash, retainHeight, err = m.Executor.Commit(m.State, block, responses) if err != nil { return fmt.Errorf("commit block: %w", err) } - - - - if 0 < retainHeight { select { case m.pruningC <- retainHeight: @@ -121,25 +101,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.logger.Debug("pruning channel full. skipping pruning", "retainHeight", retainHeight) } } - - + m.Executor.UpdateStateAfterCommit(m.State, responses, appHash, block.Header.Height, block.Header.Hash()) } - m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) - - - - - - - - - - - + proposer := m.State.GetProposer() if proposer == nil { return fmt.Errorf("logic error: got nil proposer while applying block") @@ -147,28 +115,18 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta batch := m.Store.NewBatch() - - batch, err = m.Store.SaveProposer(block.Header.Height, *proposer, batch) if err != nil { return fmt.Errorf("save proposer: %w", err) } - isProposerUpdated := m.Executor.UpdateProposerFromBlock(m.State, m.Sequencers, block) - batch, err = m.Store.SaveState(m.State, batch) if err != nil { return fmt.Errorf("update state: %w", err) } - - - - - - if len(blockMetaData.SequencerSet) != 0 { batch, err = m.Store.SaveLastBlockSequencerSet(blockMetaData.SequencerSet, batch) if err != nil { @@ -185,16 +143,11 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta m.blockCache.Delete(block.Header.Height) - err = m.ValidateConfigWithRollappParams() if err != nil { return err } - - - - if isProposerUpdated && m.AmIProposerOnRollapp() { panic("I'm the new Proposer now. restarting as a proposer") } @@ -202,16 +155,13 @@ func (m *Manager) applyBlock(block *types.Block, commit *types.Commit, blockMeta return nil } - func (m *Manager) isHeightAlreadyApplied(blockHeight uint64) (bool, error) { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return false, errorsmod.Wrap(err, "get app info") } - isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight - - + isBlockAlreadyApplied := uint64(proxyAppInfo.LastBlockHeight) == blockHeight return isBlockAlreadyApplied, nil } @@ -240,7 +190,6 @@ func (m *Manager) attemptApplyCachedBlocks() error { return nil } - func (m *Manager) validateBlockBeforeApply(block *types.Block, commit *types.Commit) error { return types.ValidateProposedTransition(m.State, block, commit, m.State.GetProposerPubKey()) } diff --git a/block/block_cache.go b/block/block_cache.go index b74176d9e..4c015c504 100644 --- a/block/block_cache.go +++ b/block/block_cache.go @@ -5,7 +5,6 @@ import ( ) type Cache struct { - cache map[uint64]types.CachedBlock } diff --git a/block/consensus.go b/block/consensus.go index 87cc6c39d..9a6f3d34e 100644 --- a/block/consensus.go +++ b/block/consensus.go @@ -47,7 +47,6 @@ func ConsensusMsgSigner(m proto.Message) (sdk.AccAddress, error) { } } - func ConsensusMsgsOnSequencerSetUpdate(newSequencers []types.Sequencer) ([]proto.Message, error) { msgs := make([]proto.Message, 0, len(newSequencers)) for _, s := range newSequencers { diff --git a/block/executor.go b/block/executor.go index 9f7d72f8b..e48c5be85 100644 --- a/block/executor.go +++ b/block/executor.go @@ -19,7 +19,6 @@ import ( protoutils "github.com/dymensionxyz/dymint/utils/proto" ) - const minBlockMaxBytes = 10000 type ExecutorI interface { @@ -33,15 +32,12 @@ type ExecutorI interface { UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool - - AddConsensusMsgs(...proto2.Message) GetConsensusMsgs() []proto2.Message } var _ ExecutorI = new(Executor) - type Executor struct { localAddress []byte chainID string @@ -55,8 +51,6 @@ type Executor struct { logger types.Logger } - - func NewExecutor( localAddress []byte, chainID string, @@ -79,23 +73,17 @@ func NewExecutor( return &be, nil } - - func (e *Executor) AddConsensusMsgs(msgs ...proto2.Message) { e.consensusMsgQueue.Add(msgs...) } - - func (e *Executor) GetConsensusMsgs() []proto2.Message { return e.consensusMsgQueue.Get() } - func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string, valset []*tmtypes.Validator) (*abci.ResponseInitChain, error) { valUpdates := abci.ValidatorUpdates{} - for _, validator := range valset { tmkey, err := tmcrypto.PubKeyToProto(validator.PubKey) if err != nil { @@ -136,7 +124,6 @@ func (e *Executor) InitChain(genesis *tmtypes.GenesisDoc, genesisChecksum string }) } - func (e *Executor) CreateBlock( height uint64, lastCommit *types.Commit, @@ -144,8 +131,8 @@ func (e *Executor) CreateBlock( state *types.State, maxBlockDataSizeBytes uint64, ) *types.Block { - maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) - mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) + maxBlockDataSizeBytes = min(maxBlockDataSizeBytes, uint64(max(minBlockMaxBytes, state.ConsensusParams.Block.MaxBytes))) + mempoolTxs := e.mempool.ReapMaxBytesMaxGas(int64(maxBlockDataSizeBytes), state.ConsensusParams.Block.MaxGas) block := &types.Block{ Header: types.Header{ @@ -178,7 +165,6 @@ func (e *Executor) CreateBlock( return block } - func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate.ABCIResponses) ([]byte, int64, error) { appHash, retainHeight, err := e.commit(state, block, resp.DeliverTxs) if err != nil { @@ -193,7 +179,6 @@ func (e *Executor) Commit(state *types.State, block *types.Block, resp *tmstate. return appHash, retainHeight, nil } - func (e *Executor) GetAppInfo() (*abci.ResponseInfo, error) { return e.proxyAppQueryConn.InfoSync(abci.RequestInfo{}) } @@ -214,7 +199,7 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas - err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) + err = e.mempool.Update(int64(block.Header.Height), fromDymintTxs(block.Data.Txs), deliverTxs) if err != nil { return nil, 0, err } @@ -224,7 +209,6 @@ func (e *Executor) commit(state *types.State, block *types.Block, deliverTxs []* return resp.Data, resp.RetainHeight, err } - func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, error) { abciResponses := new(tmstate.ABCIResponses) abciResponses.DeliverTxs = make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) @@ -273,7 +257,7 @@ func (e *Executor) ExecuteBlock(block *types.Block) (*tmstate.ABCIResponses, err } } - abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) + abciResponses.EndBlock, err = e.proxyAppConsensusConn.EndBlockSync(abci.RequestEndBlock{Height: int64(block.Header.Height)}) if err != nil { return nil, err } @@ -305,14 +289,14 @@ func (e *Executor) publishEvents(resp *tmstate.ABCIResponses, block *types.Block for _, ev := range abciBlock.Evidence.Evidence { err = multierr.Append(err, e.eventBus.PublishEventNewEvidence(tmtypes.EventDataNewEvidence{ Evidence: ev, - Height: int64(block.Header.Height), + Height: int64(block.Header.Height), })) } for i, dtx := range resp.DeliverTxs { err = multierr.Append(err, e.eventBus.PublishEventTx(tmtypes.EventDataTx{ TxResult: abci.TxResult{ - Height: int64(block.Header.Height), - Index: uint32(i), + Height: int64(block.Header.Height), + Index: uint32(i), Tx: abciBlock.Data.Txs[i], Result: *dtx, }, diff --git a/block/fork.go b/block/fork.go index 5f1ff5878..12a9158f5 100644 --- a/block/fork.go +++ b/block/fork.go @@ -20,9 +20,8 @@ const ( ForkMessage = "rollapp fork detected. please rollback to height previous to rollapp_revision_start_height." ) - func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { - ticker := time.NewTicker(ForkMonitorInterval) + ticker := time.NewTicker(ForkMonitorInterval) defer ticker.Stop() for { @@ -37,7 +36,6 @@ func (m *Manager) MonitorForkUpdateLoop(ctx context.Context) error { } } - func (m *Manager) checkForkUpdate(msg string) error { defer m.forkMu.Unlock() m.forkMu.Lock() @@ -69,7 +67,6 @@ func (m *Manager) checkForkUpdate(msg string) error { return nil } - func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Instruction, error) { obsoleteDrs, err := m.SLClient.GetObsoleteDrs() if err != nil { @@ -85,11 +82,6 @@ func (m *Manager) createInstruction(expectedRevision types.Revision) (types.Inst return instruction, nil } - - - - - func shouldStopNode( expectedRevision types.Revision, nextHeight uint64, @@ -98,7 +90,6 @@ func shouldStopNode( return nextHeight >= expectedRevision.StartHeight && actualRevisionNumber < expectedRevision.Number } - func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { rollapp, err := m.SLClient.GetRollapp() if err != nil { @@ -107,26 +98,22 @@ func (m *Manager) getRevisionFromSL(height uint64) (types.Revision, error) { return rollapp.GetRevisionForHeight(height), nil } - func (m *Manager) doFork(instruction types.Instruction) error { - if m.State.Height() < instruction.RevisionStartHeight+1 { - + consensusMsgs, err := m.prepareDRSUpgradeMessages(instruction.FaultyDRS) if err != nil { return fmt.Errorf("prepare DRS upgrade messages: %v", err) } - + consensusMsgs = append(consensusMsgs, &sequencers.MsgBumpAccountSequences{Authority: authtypes.NewModuleAddress("sequencers").String()}) - err = m.createForkBlocks(instruction, consensusMsgs) if err != nil { return fmt.Errorf("validate fork blocks: %v", err) } } - if err := m.submitForkBatch(instruction.RevisionStartHeight); err != nil { return fmt.Errorf("submit fork batch: %v", err) } @@ -134,13 +121,6 @@ func (m *Manager) doFork(instruction types.Instruction) error { return nil } - - - - - - - func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Message, error) { drsVersion, err := version.GetDRSVersion() if err != nil { @@ -161,13 +141,9 @@ func (m *Manager) prepareDRSUpgradeMessages(obsoleteDRS []uint32) ([]proto.Messa }, nil } - - - func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs []proto.Message) error { nextHeight := m.State.NextHeight() - for h := instruction.RevisionStartHeight; h < nextHeight; h++ { b, err := m.Store.LoadBlock(h) if err != nil { @@ -183,7 +159,6 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs } } - for h := nextHeight; h < instruction.RevisionStartHeight+2; h++ { if h == instruction.RevisionStartHeight { m.Executor.AddConsensusMsgs(consensusMsgs...) @@ -201,13 +176,6 @@ func (m *Manager) createForkBlocks(instruction types.Instruction, consensusMsgs return nil } - - - - - - - func (m *Manager) submitForkBatch(height uint64) error { resp, err := m.SLClient.GetBatchAtHeight(height) if err != nil && !errors.Is(err, gerrc.ErrNotFound) { @@ -225,62 +193,51 @@ func (m *Manager) submitForkBatch(height uint64) error { return nil } - func (m *Manager) updateStateForNextRevision() error { - - - nextRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - if nextRevision.StartHeight == m.State.NextHeight() { - + m.State.SetProposer(nil) - + m.State.RevisionStartHeight = nextRevision.StartHeight m.State.SetRevision(nextRevision.Number) - _, err = m.Store.SaveState(m.State, nil) return err } return nil } - func (m *Manager) doForkWhenNewRevision() error { defer m.forkMu.Unlock() m.forkMu.Lock() - expectedRevision, err := m.getRevisionFromSL(m.State.NextHeight()) if err != nil { return err } - if m.LastSettlementHeight.Load() < expectedRevision.StartHeight { instruction, err := m.createInstruction(expectedRevision) if err != nil { return err } - + m.State.SetRevision(instruction.Revision) - + err = m.doFork(instruction) if err != nil { return err } } - if expectedRevision.Number != m.State.GetRevision() { panic("Inconsistent expected revision number from Hub. Unable to fork") } - return types.DeleteInstructionFromDisk(m.RootDir) } diff --git a/block/fraud.go b/block/fraud.go index f543420eb..1f4ab2a99 100644 --- a/block/fraud.go +++ b/block/fraud.go @@ -4,16 +4,10 @@ import ( "context" ) - - type FraudHandler interface { - - HandleFault(ctx context.Context, fault error) } - - type FreezeHandler struct { m *Manager } diff --git a/block/initchain.go b/block/initchain.go index 48fea86a7..cc0daadca 100644 --- a/block/initchain.go +++ b/block/initchain.go @@ -11,8 +11,7 @@ import ( ) func (m *Manager) RunInitChain() error { - - proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) + proposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.Height()) + 1) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } @@ -25,13 +24,11 @@ func (m *Manager) RunInitChain() error { return err } - err = m.ValidateGenesisBridgeData(res.GenesisBridgeDataBytes) if err != nil { return fmt.Errorf("Cannot validate genesis bridge data: %w. Please call `$EXECUTABLE dymint unsafe-reset-all` before the next launch to reset this node to genesis state.", err) } - m.Executor.UpdateStateAfterInitChain(m.State, res) m.Executor.UpdateMempoolAfterInitChain(m.State) if _, err := m.Store.SaveState(m.State, nil); err != nil { @@ -41,8 +38,6 @@ func (m *Manager) RunInitChain() error { return nil } - - func (m *Manager) ValidateGenesisBridgeData(dataBytes []byte) error { if len(dataBytes) == 0 { return fmt.Errorf("genesis bridge data is empty in InitChainResponse") diff --git a/block/manager.go b/block/manager.go index 06594e29d..6c20837cc 100644 --- a/block/manager.go +++ b/block/manager.go @@ -36,95 +36,66 @@ import ( ) const ( - RunModeProposer uint = iota - + RunModeFullNode ) - type Manager struct { logger types.Logger - Conf config.BlockManagerConfig Genesis *tmtypes.GenesisDoc GenesisChecksum string LocalKey crypto.PrivKey RootDir string - Store store.Store State *types.State Executor ExecutorI - Sequencers *types.SequencerSet + Sequencers *types.SequencerSet - Pubsub *pubsub.Server P2PClient *p2p.Client DAClient da.DataAvailabilityLayerClient SLClient settlement.ClientI - RunMode uint - Cancel context.CancelFunc Ctx context.Context - LastBlockTimeInSettlement atomic.Int64 - LastBlockTime atomic.Int64 - forkMu sync.Mutex - - - - - + LastSettlementHeight atomic.Uint64 - pruningC chan int64 - IndexerService *txindex.IndexerService - Retriever da.BatchRetriever - - - retrieverMu sync.Mutex - - blockCache *Cache - TargetHeight atomic.Uint64 - FraudHandler FraudHandler - settlementSyncingC chan struct{} - settlementValidationC chan struct{} - syncedFromSettlement *uchannel.Nudger - SettlementValidator *SettlementValidator } - func NewManager( localKey crypto.PrivKey, conf config.NodeConfig, @@ -151,7 +122,7 @@ func NewManager( mempool, proxyApp, eventBus, - NewConsensusMsgQueue(), + NewConsensusMsgQueue(), logger, ) if err != nil { @@ -175,10 +146,10 @@ func NewManager( blockCache: &Cache{ cache: make(map[uint64]types.CachedBlock), }, - pruningC: make(chan int64, 10), - settlementSyncingC: make(chan struct{}, 1), - settlementValidationC: make(chan struct{}, 1), - syncedFromSettlement: uchannel.NewNudger(), + pruningC: make(chan int64, 10), + settlementSyncingC: make(chan struct{}, 1), + settlementValidationC: make(chan struct{}, 1), + syncedFromSettlement: uchannel.NewNudger(), } m.setFraudHandler(NewFreezeHandler(m)) err = m.LoadStateOnInit(store, genesis, logger) @@ -191,13 +162,11 @@ func NewManager( return nil, err } - err = m.updateStateForNextRevision() if err != nil { return nil, err } - err = m.ValidateConfigWithRollappParams() if err != nil { return nil, err @@ -208,10 +177,9 @@ func NewManager( return m, nil } - func (m *Manager) Start(ctx context.Context) error { m.Ctx, m.Cancel = context.WithCancel(ctx) - + if m.State.IsGenesis() { m.logger.Info("Running InitChain") @@ -221,9 +189,6 @@ func (m *Manager) Start(ctx context.Context) error { } } - - - if m.State.GetProposer() == nil { m.logger.Info("No proposer on the rollapp, fallback to the hub proposer, if available") err := m.UpdateProposerFromSL() @@ -236,10 +201,6 @@ func (m *Manager) Start(ctx context.Context) error { } } - - - - amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -249,30 +210,25 @@ func (m *Manager) Start(ctx context.Context) error { m.logger.Info("starting block manager", "mode", map[bool]string{true: "proposer", false: "full node"}[amIProposer]) - err = m.updateFromLastSettlementState() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - m.triggerSettlementSyncing() - + m.triggerSettlementValidation() eg, ctx := errgroup.WithContext(m.Ctx) - uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.PruningLoop(ctx) }) - uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementSyncLoop(ctx) }) - uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorSequencerSetUpdates(ctx) }) @@ -285,7 +241,6 @@ func (m *Manager) Start(ctx context.Context) error { return m.MonitorBalances(ctx) }) - if !amIProposer { return m.runAsFullNode(ctx, eg) } @@ -297,26 +252,21 @@ func (m *Manager) NextHeightToSubmit() uint64 { return m.LastSettlementHeight.Load() + 1 } - func (m *Manager) updateFromLastSettlementState() error { - err := m.UpdateSequencerSetFromSL() if err != nil { - m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } - latestHeight, err := m.SLClient.GetLatestHeight() if errors.Is(err, gerrc.ErrNotFound) { - + m.logger.Info("No batches for chain found in SL.") - m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) + m.LastSettlementHeight.Store(uint64(m.Genesis.InitialHeight - 1)) m.LastBlockTimeInSettlement.Store(m.Genesis.GenesisTime.UTC().UnixNano()) return nil } if err != nil { - return err } @@ -327,10 +277,8 @@ func (m *Manager) updateFromLastSettlementState() error { m.LastSettlementHeight.Store(latestHeight) - m.SetLastBlockTimeInSettlementFromHeight(latestHeight) - block, err := m.Store.LoadBlock(m.State.Height()) if err == nil { m.LastBlockTime.Store(block.Header.GetTimestamp().UTC().UnixNano()) @@ -339,7 +287,6 @@ func (m *Manager) updateFromLastSettlementState() error { } func (m *Manager) updateLastFinalizedHeightFromSettlement() error { - height, err := m.SLClient.GetLatestFinalizedHeight() if errors.Is(err, gerrc.ErrNotFound) { m.logger.Info("No finalized batches for chain found in SL.") @@ -368,7 +315,6 @@ func (m *Manager) UpdateTargetHeight(h uint64) { } } - func (m *Manager) ValidateConfigWithRollappParams() error { if da.Client(m.State.RollappParams.Da) != m.DAClient.GetClientType() { return fmt.Errorf("da client mismatch. rollapp param: %s da configured: %s", m.State.RollappParams.Da, m.DAClient.GetClientType()) @@ -381,7 +327,6 @@ func (m *Manager) ValidateConfigWithRollappParams() error { return nil } - func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) error { daLayer := m.State.RollappParams.Da dalc := registry.GetClient(daLayer) @@ -402,12 +347,10 @@ func (m *Manager) setDA(daconfig string, dalcKV store.KV, logger log.Logger) err return nil } - func (m *Manager) setFraudHandler(handler *FreezeHandler) { m.FraudHandler = handler } - func (m *Manager) freezeNode(err error) { m.logger.Info("Freezing node", "err", err) if m.Ctx.Err() != nil { @@ -417,11 +360,9 @@ func (m *Manager) freezeNode(err error) { m.Cancel() } - func (m *Manager) SetLastBlockTimeInSettlementFromHeight(lastSettlementHeight uint64) { block, err := m.Store.LoadBlock(lastSettlementHeight) if err != nil { - return } m.LastBlockTimeInSettlement.Store(block.Header.GetTimestamp().UTC().UnixNano()) diff --git a/block/modes.go b/block/modes.go index e8a48d33f..8fa3e5838 100644 --- a/block/modes.go +++ b/block/modes.go @@ -20,43 +20,36 @@ const ( p2pBlocksyncLoop = "applyBlockSyncBlocksLoop" ) - func (m *Manager) runAsFullNode(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "full node") m.RunMode = RunModeFullNode - + err := m.updateLastFinalizedHeightFromSettlement() if err != nil { return fmt.Errorf("sync block manager from settlement: %w", err) } - uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.SettlementValidateLoop(ctx) }) m.subscribeFullNodeEvents(ctx) - return types.DeleteInstructionFromDisk(m.RootDir) } func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { m.logger.Info("starting block manager", "mode", "proposer") m.RunMode = RunModeProposer - + go uevent.MustSubscribe(ctx, m.Pubsub, "updateSubmittedHeightLoop", settlement.EventQueryNewSettlementBatchAccepted, m.UpdateLastSubmittedHeight, m.logger) - + go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) - - m.DAClient.WaitForSyncing() - m.waitForSettlementSyncing() - amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return fmt.Errorf("am i proposer on SL: %w", err) @@ -65,28 +58,24 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { return fmt.Errorf("the node is no longer the proposer. please restart.") } - err = m.UpdateProposerFromSL() if err != nil { return err } - err = m.doForkWhenNewRevision() if err != nil { return err } - shouldRotate, err := m.ShouldRotate() if err != nil { return fmt.Errorf("checking should rotate: %w", err) } if shouldRotate { - m.rotate(ctx) + m.rotate(ctx) } - bytesProducedC := make(chan int) uerrors.ErrGroupGoLog(eg, m.logger, func() error { @@ -94,18 +83,17 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { }) uerrors.ErrGroupGoLog(eg, m.logger, func() error { - bytesProducedC <- m.GetUnsubmittedBytes() + bytesProducedC <- m.GetUnsubmittedBytes() return m.ProduceBlockLoop(ctx, bytesProducedC) }) - uerrors.ErrGroupGoLog(eg, m.logger, func() error { return m.MonitorProposerRotation(ctx) }) go func() { err = eg.Wait() - + if errors.Is(err, errRotationRequested) { m.rotate(ctx) } else if err != nil { @@ -118,11 +106,9 @@ func (m *Manager) runAsProposer(ctx context.Context, eg *errgroup.Group) error { } func (m *Manager) subscribeFullNodeEvents(ctx context.Context) { - go uevent.MustSubscribe(ctx, m.Pubsub, syncLoop, settlement.EventQueryNewSettlementBatchAccepted, m.onNewStateUpdate, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, validateLoop, settlement.EventQueryNewSettlementBatchFinalized, m.onNewStateUpdateFinalized, m.logger) - go uevent.MustSubscribe(ctx, m.Pubsub, p2pGossipLoop, p2p.EventQueryNewGossipedBlock, m.OnReceivedBlock, m.logger) go uevent.MustSubscribe(ctx, m.Pubsub, p2pBlocksyncLoop, p2p.EventQueryNewBlockSyncBlock, m.OnReceivedBlock, m.logger) } diff --git a/block/p2p.go b/block/p2p.go index c1c679dd3..8f0683123 100644 --- a/block/p2p.go +++ b/block/p2p.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - func (m *Manager) OnReceivedBlock(event pubsub.Message) { eventData, ok := event.Data().(p2p.BlockData) if !ok { @@ -40,9 +39,8 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { if block.Header.Height < m.State.NextHeight() { return } - m.retrieverMu.Lock() + m.retrieverMu.Lock() - if m.blockCache.Has(height) { m.retrieverMu.Unlock() return @@ -54,7 +52,7 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { m.logger.Debug("Received new block from p2p.", "block height", height, "source", source.String(), "store height", m.State.Height(), "n cachedBlocks", m.blockCache.Size()) m.blockCache.Add(height, &block, &commit, source) - m.retrieverMu.Unlock() + m.retrieverMu.Unlock() err := m.attemptApplyCachedBlocks() if err != nil { @@ -63,7 +61,6 @@ func (m *Manager) OnReceivedBlock(event pubsub.Message) { } } - func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit types.Commit) error { m.logger.Info("Gossipping block", "height", block.Header.Height) gossipedBlock := p2p.BlockData{Block: block, Commit: commit} @@ -72,15 +69,12 @@ func (m *Manager) gossipBlock(ctx context.Context, block types.Block, commit typ return fmt.Errorf("marshal binary: %w: %w", err, ErrNonRecoverable) } if err := m.P2PClient.GossipBlock(ctx, gossipedBlockBytes); err != nil { - - return fmt.Errorf("p2p gossip block: %w: %w", err, ErrRecoverable) } return nil } - func (m *Manager) saveP2PBlockToBlockSync(block *types.Block, commit *types.Commit) error { gossipedBlock := p2p.BlockData{Block: *block, Commit: *commit} gossipedBlockBytes, err := gossipedBlock.MarshalBinary() diff --git a/block/produce.go b/block/produce.go index a2d4ffa64..867a56711 100644 --- a/block/produce.go +++ b/block/produce.go @@ -20,9 +20,6 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) error { m.logger.Info("Started block producer loop.") @@ -40,12 +37,11 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) case <-ctx.Done(): return nil case <-ticker.C: - + if !m.AmIProposerOnRollapp() { continue } - produceEmptyBlock := firstBlock || m.Conf.MaxIdleTime == 0 || nextEmptyBlock.Before(time.Now()) firstBlock = false @@ -54,7 +50,7 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) m.logger.Error("Produce and gossip: context canceled.", "error", err) return nil } - if errors.Is(err, types.ErrEmptyBlock) { + if errors.Is(err, types.ErrEmptyBlock) { continue } if errors.Is(err, ErrNonRecoverable) { @@ -68,8 +64,6 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) } nextEmptyBlock = time.Now().Add(m.Conf.MaxIdleTime) if 0 < len(block.Data.Txs) { - - nextEmptyBlock = time.Now().Add(m.Conf.MaxProofTime) } else { m.logger.Info("Produced empty block.") @@ -102,10 +96,9 @@ func (m *Manager) ProduceBlockLoop(ctx context.Context, bytesProducedC chan int) type ProduceBlockOptions struct { AllowEmpty bool MaxData *uint64 - NextProposerHash *[32]byte + NextProposerHash *[32]byte } - func (m *Manager) ProduceApplyGossipLastBlock(ctx context.Context, nextProposerHash [32]byte) (err error) { _, _, err = m.produceApplyGossip(ctx, ProduceBlockOptions{ AllowEmpty: true, @@ -119,22 +112,13 @@ func (m *Manager) ProduceApplyGossipBlock(ctx context.Context, opts ProduceBlock } func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptions) (block *types.Block, commit *types.Commit, err error) { - - - - - - - newSequencerSet, err := m.SnapshotSequencerSet() if err != nil { return nil, nil, fmt.Errorf("snapshot sequencer set: %w", err) } - - + opts.AllowEmpty = opts.AllowEmpty || len(newSequencerSet) > 0 - block, commit, err = m.produceBlock(opts) if err != nil { return nil, nil, fmt.Errorf("produce block: %w", err) @@ -151,50 +135,27 @@ func (m *Manager) produceApplyGossip(ctx context.Context, opts ProduceBlockOptio return block, commit, nil } - - - - - - - - - - - - - - func (m *Manager) SnapshotSequencerSet() (sequencersAfterUpdate types.Sequencers, err error) { - sequencersAfterUpdate = m.Sequencers.GetAll() - lastSequencers, err := m.Store.LoadLastBlockSequencerSet() - - + if err != nil && !errors.Is(err, gerrc.ErrNotFound) { - return nil, fmt.Errorf("load last block sequencer set: %w: %w", err, ErrNonRecoverable) } - newSequencers := types.SequencerListRightOuterJoin(lastSequencers, sequencersAfterUpdate) if len(newSequencers) == 0 { - return nil, nil } - - msgs, err := ConsensusMsgsOnSequencerSetUpdate(newSequencers) if err != nil { return nil, fmt.Errorf("consensus msgs on sequencers set update: %w: %w", err, ErrNonRecoverable) } m.Executor.AddConsensusMsgs(msgs...) - return sequencersAfterUpdate, nil } @@ -202,18 +163,15 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C newHeight := m.State.NextHeight() lastHeaderHash, lastCommit, err := m.GetPreviousBlockHashes(newHeight) if err != nil { - return nil, nil, fmt.Errorf("load prev block: %w", err) } var block *types.Block var commit *types.Commit - - pendingBlock, err := m.Store.LoadBlock(newHeight) if err == nil { - + block = pendingBlock commit, err = m.Store.LoadCommit(newHeight) if err != nil { @@ -230,16 +188,14 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C maxBlockDataSize = *opts.MaxData } proposerHashForBlock := [32]byte(m.State.GetProposerHash()) - + if opts.NextProposerHash != nil { maxBlockDataSize = 0 proposerHashForBlock = *opts.NextProposerHash } - block = m.Executor.CreateBlock(newHeight, lastCommit, lastHeaderHash, proposerHashForBlock, m.State, maxBlockDataSize) - - + if !opts.AllowEmpty && len(block.Data.Txs) == 0 { return nil, nil, fmt.Errorf("%w: %w", types.ErrEmptyBlock, ErrRecoverable) } @@ -255,7 +211,6 @@ func (m *Manager) produceBlock(opts ProduceBlockOptions) (*types.Block, *types.C return block, commit, nil } - func (m *Manager) createCommit(block *types.Block) (*types.Commit, error) { abciHeaderPb := types.ToABCIHeaderPB(&block.Header) abciHeaderBytes, err := abciHeaderPb.Marshal() @@ -290,7 +245,7 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, headerHash := block.Header.Hash() vote := tmtypes.Vote{ Type: cmtproto.PrecommitType, - Height: int64(block.Header.Height), + Height: int64(block.Header.Height), Round: 0, Timestamp: voteTimestamp, BlockID: tmtypes.BlockID{Hash: headerHash[:], PartSetHeader: tmtypes.PartSetHeader{ @@ -301,18 +256,17 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, ValidatorIndex: 0, } v := vote.ToProto() - - + rawKey, _ := m.LocalKey.Raw() tmprivkey := tmed25519.PrivKey(rawKey) tmprivkey.PubKey().Bytes() - + tmvalidator := tmtypes.NewMockPVWithParams(tmprivkey, false, false) err := tmvalidator.SignVote(m.State.ChainID, v) if err != nil { return nil, err } - + vote.Signature = v.Signature pubKey := tmprivkey.PubKey() voteSignBytes := tmtypes.VoteSignBytes(m.State.ChainID, v) @@ -322,12 +276,10 @@ func (m *Manager) createTMSignature(block *types.Block, proposerAddress []byte, return vote.Signature, nil } - - func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]byte, lastCommit *types.Commit, err error) { - lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) + lastHeaderHash, lastCommit, err = getHeaderHashAndCommit(m.Store, forHeight-1) if err != nil { - if !m.State.IsGenesis() { + if !m.State.IsGenesis() { return [32]byte{}, nil, fmt.Errorf("load prev block: %w: %w", err, ErrNonRecoverable) } lastHeaderHash = [32]byte{} @@ -336,7 +288,6 @@ func (m *Manager) GetPreviousBlockHashes(forHeight uint64) (lastHeaderHash [32]b return lastHeaderHash, lastCommit, nil } - func getHeaderHashAndCommit(store store.Store, height uint64) ([32]byte, *types.Commit, error) { lastCommit, err := store.LoadCommit(height) if err != nil { diff --git a/block/production_test.go b/block/production_test.go index 4edb11934..04637a400 100644 --- a/block/production_test.go +++ b/block/production_test.go @@ -290,7 +290,6 @@ func TestStopBlockProduction(t *testing.T) { } func TestUpdateInitialSequencerSet(t *testing.T) { - require := require.New(t) app := testutil.GetAppMock(testutil.EndBlock) ctx := context.Background() diff --git a/block/pruning.go b/block/pruning.go index 9576938d1..913586931 100644 --- a/block/pruning.go +++ b/block/pruning.go @@ -4,9 +4,7 @@ import ( "context" ) - func (m *Manager) Prune(retainHeight uint64) { - logResult := func(err error, source string, retainHeight uint64, pruned uint64) { if err != nil { m.logger.Error("pruning", "from", source, "retain height", retainHeight, "err", err) @@ -15,20 +13,16 @@ func (m *Manager) Prune(retainHeight uint64) { } } - pruned, err := m.P2PClient.RemoveBlocks(context.Background(), retainHeight) logResult(err, "blocksync", retainHeight, pruned) - pruned, err = m.IndexerService.Prune(retainHeight, m.Store) logResult(err, "indexer", retainHeight, pruned) - pruned, err = m.Store.PruneStore(retainHeight, m.logger) logResult(err, "dymint store", retainHeight, pruned) } - func (m *Manager) PruningLoop(ctx context.Context) error { for { select { @@ -36,9 +30,9 @@ func (m *Manager) PruningLoop(ctx context.Context) error { return nil case retainHeight := <-m.pruningC: var pruningHeight uint64 - if m.RunMode == RunModeProposer { + if m.RunMode == RunModeProposer { pruningHeight = min(m.NextHeightToSubmit(), uint64(retainHeight)) - } else { + } else { pruningHeight = min(m.SettlementValidator.NextValidationHeight(), uint64(retainHeight)) } m.Prune(pruningHeight) diff --git a/block/pruning_test.go b/block/pruning_test.go index fa41f7bf6..c2bdcf006 100644 --- a/block/pruning_test.go +++ b/block/pruning_test.go @@ -87,5 +87,4 @@ func TestPruningRetainHeight(t *testing.T) { validatePruning(i, expectedPruned, pruned, err) } - } diff --git a/block/retriever.go b/block/retriever.go index 850a9ed9e..de77fe3ff 100644 --- a/block/retriever.go +++ b/block/retriever.go @@ -22,7 +22,6 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { m.retrieverMu.Lock() defer m.retrieverMu.Unlock() - if m.State.Height() > slBatch.EndHeight { return nil } @@ -30,7 +29,7 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { blockIndex := 0 for _, batch := range batchResp.Batches { for i, block := range batch.Blocks { - + if blockIndex >= len(slBatch.BlockDescriptors) { break } @@ -45,7 +44,6 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return err } - err := m.applyBlockWithFraudHandling(block, batch.Commits[i], types.BlockMetaData{Source: types.DA, DAHeight: slBatch.MetaData.DA.Height}) if err != nil { return fmt.Errorf("apply block: height: %d: %w", block.Header.Height, err) @@ -55,7 +53,6 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { } } - if m.State.Height() != slBatch.EndHeight { return fmt.Errorf("state height mismatch: state height: %d: batch end height: %d", m.State.Height(), slBatch.EndHeight) } @@ -63,14 +60,6 @@ func (m *Manager) ApplyBatchFromSL(slBatch *settlement.Batch) error { return nil } - - - - - - - - func (m *Manager) applyLocalBlock() error { defer m.retrieverMu.Unlock() m.retrieverMu.Lock() @@ -101,7 +90,6 @@ func (m *Manager) applyLocalBlock() error { } func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - if daMetaData.Client != m.DAClient.GetClientType() { return da.ResultRetrieveBatch{ BaseResult: da.BaseResult{ @@ -112,9 +100,7 @@ func (m *Manager) fetchBatch(daMetaData *da.DASubmitMetaData) da.ResultRetrieveB } } - batchRes := m.Retriever.RetrieveBatches(daMetaData) - - + return batchRes } diff --git a/block/sequencers.go b/block/sequencers.go index ab0597222..159bd24a7 100644 --- a/block/sequencers.go +++ b/block/sequencers.go @@ -14,7 +14,7 @@ const ( var errRotationRequested = fmt.Errorf("sequencer rotation started. signal to stop production") func (m *Manager) MonitorProposerRotation(ctx context.Context) error { - ticker := time.NewTicker(ProposerMonitorInterval) + ticker := time.NewTicker(ProposerMonitorInterval) defer ticker.Stop() for { @@ -27,12 +27,11 @@ func (m *Manager) MonitorProposerRotation(ctx context.Context) error { m.logger.Error("Check rotation in progress", "err", err) continue } - + if nextProposer == nil { continue } - m.logger.Info("Sequencer rotation started.", "nextSeqAddr", nextProposer.SettlementAddress) return errRotationRequested } @@ -50,18 +49,15 @@ func (m *Manager) MonitorSequencerSetUpdates(ctx context.Context) error { case <-ticker.C: err := m.UpdateSequencerSetFromSL() if err != nil { - m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } } } } - - func (m *Manager) AmIProposerOnSL() (bool, error) { localProposerKeyBytes, _ := m.LocalKey.GetPublic().Raw() - + SLProposer, err := m.SLClient.GetProposerAtHeight(-1) if err != nil { return false, fmt.Errorf("get proposer at height: %w", err) @@ -69,8 +65,6 @@ func (m *Manager) AmIProposerOnSL() (bool, error) { return bytes.Equal(SLProposer.PubKey().Bytes(), localProposerKeyBytes), nil } - - func (m *Manager) AmIProposerOnRollapp() bool { if m.State.GetProposer() == nil { return false @@ -81,8 +75,6 @@ func (m *Manager) AmIProposerOnRollapp() bool { return bytes.Equal(rollappProposer, localProposerKeyBytes) } - - func (m *Manager) ShouldRotate() (bool, error) { nextProposer, err := m.SLClient.GetNextProposer() if err != nil { @@ -91,8 +83,7 @@ func (m *Manager) ShouldRotate() (bool, error) { if nextProposer == nil { return false, nil } - - + amIProposerOnSL, err := m.AmIProposerOnSL() if err != nil { return false, fmt.Errorf("am i proposer on SL: %w", err) @@ -100,13 +91,7 @@ func (m *Manager) ShouldRotate() (bool, error) { return amIProposerOnSL, nil } - - - - - func (m *Manager) rotate(ctx context.Context) { - nextProposer, err := m.SLClient.GetNextProposer() if err != nil || nextProposer == nil { panic(fmt.Sprintf("rotate: fetch next proposer set from Hub: %v", err)) @@ -127,8 +112,6 @@ func (m *Manager) rotate(ctx context.Context) { panic("rotate: sequencer is no longer the proposer. restarting as a full node") } - - func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]byte) error { h := m.State.Height() block, err := m.Store.LoadBlock(h) @@ -136,8 +119,6 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return fmt.Errorf("load block: height: %d: %w", h, err) } - - if bytes.Equal(block.Header.NextSequencersHash[:], nextSeqHash[:]) { m.logger.Debug("Last block already produced and applied.") } else { @@ -147,7 +128,6 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by } } - for { b, err := m.CreateAndSubmitBatch(m.Conf.BatchSubmitBytes, true) if err != nil { @@ -162,9 +142,6 @@ func (m *Manager) CreateAndPostLastBatch(ctx context.Context, nextSeqHash [32]by return nil } - - - func (m *Manager) UpdateSequencerSetFromSL() error { seqs, err := m.SLClient.GetAllSequencers() if err != nil { @@ -175,9 +152,8 @@ func (m *Manager) UpdateSequencerSetFromSL() error { return nil } - func (m *Manager) UpdateProposerFromSL() error { - SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) + SLProposer, err := m.SLClient.GetProposerAtHeight(int64(m.State.NextHeight())) if err != nil { return fmt.Errorf("get proposer at height: %w", err) } diff --git a/block/slvalidator.go b/block/slvalidator.go index 700911dc5..1150fd7b6 100644 --- a/block/slvalidator.go +++ b/block/slvalidator.go @@ -13,14 +13,12 @@ import ( "github.com/dymensionxyz/dymint/types" ) - type SettlementValidator struct { logger types.Logger blockManager *Manager lastValidatedHeight atomic.Uint64 } - func NewSettlementValidator(logger types.Logger, blockManager *Manager) *SettlementValidator { lastValidatedHeight, err := blockManager.Store.LoadValidationHeight() if err != nil { @@ -36,13 +34,9 @@ func NewSettlementValidator(logger types.Logger, blockManager *Manager) *Settlem return validator } - - - func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrieveBatch) error { v.logger.Debug("validating state update", "start height", batch.StartHeight, "end height", batch.EndHeight) - p2pBlocks := make(map[uint64]*types.Block) for height := batch.StartHeight; height <= batch.EndHeight; height++ { source, err := v.blockManager.Store.LoadBlockSource(height) @@ -51,7 +45,6 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie continue } - if source != types.Gossiped && source != types.BlockSync { continue } @@ -64,7 +57,6 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie p2pBlocks[block.Header.Height] = block } - var daBatch da.ResultRetrieveBatch for { daBatch = v.blockManager.Retriever.RetrieveBatches(batch.MetaData.DA) @@ -72,18 +64,15 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie break } - if errors.Is(daBatch.BaseResult.Error, da.ErrBlobNotParsed) { return types.NewErrStateUpdateBlobCorruptedFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - checkBatchResult := v.blockManager.Retriever.CheckBatchAvailability(batch.MetaData.DA) if errors.Is(checkBatchResult.Error, da.ErrBlobNotIncluded) { return types.NewErrStateUpdateBlobNotAvailableFraud(batch.StateIndex, string(batch.MetaData.DA.Client), batch.MetaData.DA.Height, hex.EncodeToString(batch.MetaData.DA.Commitment)) } - continue } @@ -93,18 +82,15 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie types.LastReceivedDAHeightGauge.Set(float64(batch.EndHeight())) } - err := v.ValidateDaBlocks(batch, daBlocks) if err != nil { return err } - if len(p2pBlocks) == 0 { return nil } - err = v.ValidateP2PBlocks(daBlocks, p2pBlocks) if err != nil { return err @@ -113,10 +99,7 @@ func (v *SettlementValidator) ValidateStateUpdate(batch *settlement.ResultRetrie return nil } - - func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBlocks map[uint64]*types.Block) error { - for _, daBlock := range daBlocks { p2pBlock, ok := p2pBlocks[daBlock.Header.Height] @@ -140,9 +123,7 @@ func (v *SettlementValidator) ValidateP2PBlocks(daBlocks []*types.Block, p2pBloc return nil } - func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetrieveBatch, daBlocks []*types.Block) error { - numSlBDs := uint64(len(slBatch.BlockDescriptors)) numSLBlocks := slBatch.NumBlocks numDABlocks := uint64(len(daBlocks)) @@ -150,36 +131,28 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return types.NewErrStateUpdateNumBlocksNotMatchingFraud(slBatch.EndHeight, numSLBlocks, numSLBlocks, numDABlocks) } - for i, bd := range slBatch.BlockDescriptors { - + if bd.Height != daBlocks[i].Header.Height { return types.NewErrStateUpdateHeightNotMatchingFraud(slBatch.StateIndex, slBatch.BlockDescriptors[0].Height, daBlocks[0].Header.Height, slBatch.BlockDescriptors[len(slBatch.BlockDescriptors)-1].Height, daBlocks[len(daBlocks)-1].Header.Height) } - + if !bytes.Equal(bd.StateRoot, daBlocks[i].Header.AppHash[:]) { return types.NewErrStateUpdateStateRootNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.StateRoot, daBlocks[i].Header.AppHash[:]) } - if !bd.Timestamp.Equal(daBlocks[i].Header.GetTimestamp()) { return types.NewErrStateUpdateTimestampNotMatchingFraud(slBatch.StateIndex, bd.Height, bd.Timestamp, daBlocks[i].Header.GetTimestamp()) } - err := v.validateDRS(slBatch.StateIndex, bd.Height, bd.DrsVersion) if err != nil { return err } } - - - - lastDABlock := daBlocks[numSlBDs-1] - if v.blockManager.State.RevisionStartHeight-1 == lastDABlock.Header.Height { v.logger.Debug("DA blocks, previous to fork, validated successfully", "start height", daBlocks[0].Header.Height, "end height", daBlocks[len(daBlocks)-1].Header.Height) return nil @@ -202,8 +175,6 @@ func (v *SettlementValidator) ValidateDaBlocks(slBatch *settlement.ResultRetriev return nil } - - func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { for { curr := v.lastValidatedHeight.Load() @@ -217,17 +188,14 @@ func (v *SettlementValidator) UpdateLastValidatedHeight(height uint64) { } } - func (v *SettlementValidator) GetLastValidatedHeight() uint64 { return v.lastValidatedHeight.Load() } - func (v *SettlementValidator) NextValidationHeight() uint64 { return v.lastValidatedHeight.Load() + 1 } - func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, version uint32) error { drs, err := v.blockManager.Store.LoadDRSVersion(height) if err != nil { @@ -240,7 +208,6 @@ func (v *SettlementValidator) validateDRS(stateIndex uint64, height uint64, vers return nil } - func blockHash(block *types.Block) ([]byte, error) { blockBytes, err := block.MarshalBinary() if err != nil { diff --git a/block/slvalidator_test.go b/block/slvalidator_test.go index 827ce680d..5c2efb564 100644 --- a/block/slvalidator_test.go +++ b/block/slvalidator_test.go @@ -146,7 +146,6 @@ func TestStateUpdateValidator_ValidateStateUpdate(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - // Create manager manager, err := testutil.GetManagerWithProposerKey(testutil.GetManagerConfig(), proposerKey, nil, 1, 1, 0, proxyApp, nil) require.NoError(t, err) @@ -258,14 +257,11 @@ func TestStateUpdateValidator_ValidateStateUpdate(t *testing.T) { } else { require.Equal(t, reflect.ValueOf(tc.expectedErrType).Type(), reflect.TypeOf(err)) } - }) } - } func TestStateUpdateValidator_ValidateDAFraud(t *testing.T) { - // Init app app := testutil.GetAppMock(testutil.EndBlock) app.On("EndBlock", mock.Anything).Return(abci.ResponseEndBlock{ @@ -329,7 +325,6 @@ func TestStateUpdateValidator_ValidateDAFraud(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - // Create manager manager, err := testutil.GetManagerWithProposerKey(testutil.GetManagerConfig(), proposerKey, nil, 1, 1, 0, proxyApp, nil) require.NoError(t, err) @@ -396,7 +391,6 @@ func TestStateUpdateValidator_ValidateDAFraud(t *testing.T) { } }) } - } func getBlockDescriptors(batch *types.Batch) ([]rollapp.BlockDescriptor, error) { diff --git a/block/state.go b/block/state.go index 2d052de06..e297588e1 100644 --- a/block/state.go +++ b/block/state.go @@ -19,7 +19,6 @@ import ( "github.com/dymensionxyz/dymint/types" ) - func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc, logger types.Logger) error { s, err := store.LoadState() if errors.Is(err, types.ErrNoStateFound) { @@ -36,18 +35,12 @@ func (m *Manager) LoadStateOnInit(store store.Store, genesis *tmtypes.GenesisDoc return nil } - - func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { err := genDoc.ValidateAndComplete() if err != nil { return nil, fmt.Errorf("in genesis doc: %w", err) } - - - - InitStateVersion := tmstate.Version{ Consensus: tmversion.Consensus{ Block: version.BlockProtocol, @@ -59,7 +52,7 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { s := types.State{ Version: InitStateVersion, ChainID: genDoc.ChainID, - InitialHeight: uint64(genDoc.InitialHeight), + InitialHeight: uint64(genDoc.InitialHeight), ConsensusParams: *genDoc.ConsensusParams, } s.SetHeight(0) @@ -73,29 +66,24 @@ func NewStateFromGenesis(genDoc *tmtypes.GenesisDoc) (*types.State, error) { return &s, nil } - func (m *Manager) UpdateStateFromApp(blockHeaderHash [32]byte) error { proxyAppInfo, err := m.Executor.GetAppInfo() if err != nil { return errorsmod.Wrap(err, "get app info") } - appHeight := uint64(proxyAppInfo.LastBlockHeight) + appHeight := uint64(proxyAppInfo.LastBlockHeight) resp, err := m.Store.LoadBlockResponses(appHeight) if err != nil { return errorsmod.Wrap(err, "load block responses") } - m.Executor.UpdateStateAfterCommit(m.State, resp, proxyAppInfo.LastBlockAppHash, appHeight, blockHeaderHash) return nil } func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseInitChain) { - - - if len(res.AppHash) > 0 { copy(s.AppHash[:], res.AppHash) } @@ -106,7 +94,7 @@ func (e *Executor) UpdateStateAfterInitChain(s *types.State, res *abci.ResponseI s.ConsensusParams.Block.MaxGas = params.Block.MaxGas } } - + copy(s.LastResultsHash[:], merkle.HashFromByteSlices(nil)) } @@ -115,7 +103,6 @@ func (e *Executor) UpdateMempoolAfterInitChain(s *types.State) { e.mempool.SetPostCheckFn(mempool.PostCheckMaxGas(s.ConsensusParams.Block.MaxGas)) } - func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { copy(s.AppHash[:], appHash[:]) copy(s.LastResultsHash[:], tmtypes.NewResults(resp.DeliverTxs).Hash()) @@ -132,26 +119,18 @@ func (e *Executor) UpdateStateAfterCommit(s *types.State, resp *tmstate.ABCIResp } } - - - func (e *Executor) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, block *types.Block) bool { - if bytes.Equal(block.Header.SequencerHash[:], block.Header.NextSequencersHash[:]) { return false } if block.Header.NextSequencersHash == [32]byte{} { - - + e.logger.Info("rollapp left with no proposer. chain is halted") s.SetProposer(nil) return true } - - - seq, found := seqSet.GetByHash(block.Header.NextSequencersHash[:]) if !found { e.logger.Error("cannot find proposer by hash") diff --git a/block/submit.go b/block/submit.go index 87150c3c9..89677b346 100644 --- a/block/submit.go +++ b/block/submit.go @@ -17,11 +17,6 @@ import ( uchannel "github.com/dymensionxyz/dymint/utils/channel" ) - - - - - func (m *Manager) SubmitLoop(ctx context.Context, bytesProduced chan int, ) (err error) { @@ -39,41 +34,37 @@ func (m *Manager) SubmitLoop(ctx context.Context, ) } - func SubmitLoopInner( ctx context.Context, logger types.Logger, - bytesProduced chan int, - maxSkewTime time.Duration, - unsubmittedBlocksNum func() uint64, - unsubmittedBlocksBytes func() int, - batchSkewTime func() time.Duration, - maxBatchSubmitTime time.Duration, - maxBatchSubmitBytes uint64, + bytesProduced chan int, + maxSkewTime time.Duration, + unsubmittedBlocksNum func() uint64, + unsubmittedBlocksBytes func() int, + batchSkewTime func() time.Duration, + maxBatchSubmitTime time.Duration, + maxBatchSubmitBytes uint64, createAndSubmitBatch func(maxSizeBytes uint64) (bytes uint64, err error), ) error { eg, ctx := errgroup.WithContext(ctx) pendingBytes := atomic.Uint64{} - trigger := uchannel.NewNudger() - submitter := uchannel.NewNudger() + trigger := uchannel.NewNudger() + submitter := uchannel.NewNudger() eg.Go(func() error { - - for { select { case <-ctx.Done(): return nil case n := <-bytesProduced: - pendingBytes.Add(uint64(n)) + pendingBytes.Add(uint64(n)) logger.Debug("Added bytes produced to bytes pending submission counter.", "bytes added", n, "pending", pendingBytes.Load()) } submitter.Nudge() - if maxSkewTime < batchSkewTime() { select { case <-ctx.Done(): @@ -86,7 +77,6 @@ func SubmitLoopInner( }) eg.Go(func() error { - ticker := time.NewTicker(maxBatchSubmitTime) for { select { @@ -98,7 +88,6 @@ func SubmitLoopInner( pending := pendingBytes.Load() - for { done := ctx.Err() != nil nothingToSubmit := pending == 0 @@ -119,22 +108,21 @@ func SubmitLoopInner( logger.Error("Create and submit batch", "err", err, "pending", pending) panic(err) } - - + if errors.Is(err, gerrc.ErrAlreadyExists) { logger.Debug("Batch already accepted", "err", err, "pending", pending) panic(err) } return err } - pending = uint64(unsubmittedBlocksBytes()) - + pending = uint64(unsubmittedBlocksBytes()) + if batchSkewTime() < maxSkewTime { trigger.Nudge() } logger.Debug("Submitted a batch to both sub-layers.", "n bytes consumed from pending", nConsumed, "pending after", pending, "skew time", batchSkewTime()) } - + pendingBytes.Store(pending) } }) @@ -142,25 +130,19 @@ func SubmitLoopInner( return eg.Wait() } - - - func (m *Manager) CreateAndSubmitBatchGetSizeBlocksCommits(maxSize uint64) (uint64, error) { b, err := m.CreateAndSubmitBatch(maxSize, false) if b == nil { return 0, err } - return uint64(b.SizeBlockAndCommitBytes()), err + return uint64(b.SizeBlockAndCommitBytes()), err } - - func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*types.Batch, error) { startHeight := m.NextHeightToSubmit() endHeightInclusive := m.State.Height() if endHeightInclusive < startHeight { - return nil, fmt.Errorf( "next height to submit is greater than last block height, create and submit batch should not have been called: start height: %d: end height inclusive: %d: %w", startHeight, @@ -173,7 +155,7 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty if err != nil { return nil, fmt.Errorf("create batch: %w", err) } - + if lastBatch && b.EndHeight() == endHeightInclusive { b.LastBatch = true } @@ -187,8 +169,6 @@ func (m *Manager) CreateAndSubmitBatch(maxSizeBytes uint64, lastBatch bool) (*ty return b, nil } - - func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeightInclusive uint64) (*types.Batch, error) { batchSize := endHeightInclusive - startHeight + 1 batch := &types.Batch{ @@ -211,7 +191,6 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight return nil, fmt.Errorf("load drs version: h: %d: %w", h, err) } - if len(batch.Blocks) > 0 && batch.Blocks[len(batch.Blocks)-1].GetRevision() != block.GetRevision() { return nil, fmt.Errorf("create batch: batch includes blocks with different revisions: %w", gerrc.ErrInternal) } @@ -221,9 +200,8 @@ func (m *Manager) CreateBatch(maxBatchSize uint64, startHeight uint64, endHeight batch.DRSVersion = append(batch.DRSVersion, drsVersion) totalSize := batch.SizeBytes() - if maxBatchSize < uint64(totalSize) { + if maxBatchSize < uint64(totalSize) { - batch.Blocks = batch.Blocks[:len(batch.Blocks)-1] batch.Commits = batch.Commits[:len(batch.Commits)-1] batch.DRSVersion = batch.DRSVersion[:len(batch.DRSVersion)-1] @@ -256,17 +234,14 @@ func (m *Manager) SubmitBatch(batch *types.Batch) error { types.RollappHubHeightGauge.Set(float64(batch.EndHeight())) m.LastSettlementHeight.Store(batch.EndHeight()) - m.LastBlockTimeInSettlement.Store(batch.Blocks[len(batch.Blocks)-1].Header.GetTimestamp().UTC().UnixNano()) return err } - - func (m *Manager) GetUnsubmittedBytes() int { total := 0 - + currH := m.State.Height() for h := m.NextHeightToSubmit(); h <= currH; h++ { @@ -294,8 +269,6 @@ func (m *Manager) GetUnsubmittedBlocks() uint64 { return m.State.Height() - m.LastSettlementHeight.Load() } - - func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -312,7 +285,6 @@ func (m *Manager) UpdateLastSubmittedHeight(event pubsub.Message) { } } - func (m *Manager) GetBatchSkewTime() time.Duration { lastProducedTime := time.Unix(0, m.LastBlockTime.Load()) lastSubmittedTime := time.Unix(0, m.LastBlockTimeInSettlement.Load()) diff --git a/block/sync.go b/block/sync.go index bef64587e..99fef18d6 100644 --- a/block/sync.go +++ b/block/sync.go @@ -12,7 +12,6 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) - func (m *Manager) onNewStateUpdate(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -20,32 +19,23 @@ func (m *Manager) onNewStateUpdate(event pubsub.Message) { return } - m.LastSettlementHeight.Store(eventData.EndHeight) - err := m.UpdateSequencerSetFromSL() if err != nil { - m.logger.Error("Cannot fetch sequencer set from the Hub", "error", err) } if eventData.EndHeight > m.State.Height() { - + m.triggerSettlementSyncing() - + m.UpdateTargetHeight(eventData.EndHeight) } else { - m.triggerSettlementValidation() } } - - - - - func (m *Manager) SettlementSyncLoop(ctx context.Context) error { for { select { @@ -55,12 +45,11 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("syncing to target height", "targetHeight", m.LastSettlementHeight.Load()) for currH := m.State.NextHeight(); currH <= m.LastSettlementHeight.Load(); currH = m.State.NextHeight() { - + if ctx.Err() != nil { return nil } - - + err := m.applyLocalBlock() if err == nil { m.logger.Info("Synced from local", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) @@ -76,12 +65,10 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } m.logger.Info("Retrieved state update from SL.", "state_index", settlementBatch.StateIndex) - m.LastBlockTimeInSettlement.Store(settlementBatch.BlockDescriptors[len(settlementBatch.BlockDescriptors)-1].GetTimestamp().UTC().UnixNano()) err = m.ApplyBatchFromSL(settlementBatch.Batch) - if errors.Is(err, da.ErrRetrieval) { continue } @@ -91,7 +78,6 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { m.logger.Info("Synced from DA", "store height", m.State.Height(), "target height", m.LastSettlementHeight.Load()) - m.triggerSettlementValidation() err = m.attemptApplyCachedBlocks() @@ -101,10 +87,9 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } - if m.State.Height() >= m.LastSettlementHeight.Load() { m.logger.Info("Synced.", "current height", m.State.Height(), "last submitted height", m.LastSettlementHeight.Load()) - + m.syncedFromSettlement.Nudge() } @@ -112,14 +97,12 @@ func (m *Manager) SettlementSyncLoop(ctx context.Context) error { } } - func (m *Manager) waitForSettlementSyncing() { if m.State.Height() < m.LastSettlementHeight.Load() { <-m.syncedFromSettlement.C } } - func (m *Manager) triggerSettlementSyncing() { select { case m.settlementSyncingC <- struct{}{}: @@ -128,7 +111,6 @@ func (m *Manager) triggerSettlementSyncing() { } } - func (m *Manager) triggerSettlementValidation() { select { case m.settlementValidationC <- struct{}{}: diff --git a/block/validate.go b/block/validate.go index e4078fe8a..ad370450d 100644 --- a/block/validate.go +++ b/block/validate.go @@ -11,8 +11,6 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - - func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { eventData, ok := event.Data().(*settlement.EventDataNewBatch) if !ok { @@ -22,7 +20,6 @@ func (m *Manager) onNewStateUpdateFinalized(event pubsub.Message) { m.SettlementValidator.UpdateLastValidatedHeight(eventData.EndHeight) } - func (m *Manager) SettlementValidateLoop(ctx context.Context) error { for { select { @@ -33,14 +30,13 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { m.logger.Info("validating state updates to target height", "targetHeight", targetValidationHeight) for currH := m.SettlementValidator.NextValidationHeight(); currH <= targetValidationHeight; currH = m.SettlementValidator.NextValidationHeight() { - + batch, err := m.SLClient.GetBatchAtHeight(currH) if err != nil { uevent.MustPublish(ctx, m.Pubsub, &events.DataHealthStatus{Error: err}, events.HealthStatusList) return err } - err = m.SettlementValidator.ValidateStateUpdate(batch) if err != nil { if errors.Is(err, gerrc.ErrFault) { @@ -51,7 +47,6 @@ func (m *Manager) SettlementValidateLoop(ctx context.Context) error { return err } - m.SettlementValidator.UpdateLastValidatedHeight(batch.EndHeight) m.logger.Debug("state info validated", "lastValidatedHeight", m.SettlementValidator.GetLastValidatedHeight()) diff --git a/cmd/dymint/commands/init.go b/cmd/dymint/commands/init.go index ce3ee91e3..92618a034 100644 --- a/cmd/dymint/commands/init.go +++ b/cmd/dymint/commands/init.go @@ -14,7 +14,6 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" ) - var InitFilesCmd = &cobra.Command{ Use: "init", Short: "Initialize Dymint", @@ -25,9 +24,7 @@ func initFiles(cmd *cobra.Command, args []string) error { return InitFilesWithConfig(tmconfig) } - func InitFilesWithConfig(config *cfg.Config) error { - privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() var pv *privval.FilePV @@ -52,7 +49,6 @@ func InitFilesWithConfig(config *cfg.Config) error { logger.Info("Generated node key", "path", nodeKeyFile) } - genFile := config.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) diff --git a/cmd/dymint/commands/root.go b/cmd/dymint/commands/root.go index 8db70aedc..49755b8c1 100644 --- a/cmd/dymint/commands/root.go +++ b/cmd/dymint/commands/root.go @@ -28,8 +28,6 @@ func registerFlagsRootCmd(cmd *cobra.Command) { cmd.PersistentFlags().String("log_level", tmconfig.LogLevel, "log level") } - - func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { conf := cfg.DefaultConfig() err := viper.Unmarshal(conf) @@ -60,14 +58,12 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { return conf, nil } - var RootCmd = &cobra.Command{ Use: "dymint", Short: "ABCI-client implementation for dymension's autonomous rollapps", PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { v := viper.GetViper() - if err := v.BindPFlags(cmd.Flags()); err != nil { return err } diff --git a/cmd/dymint/commands/show_node_id.go b/cmd/dymint/commands/show_node_id.go index 30d3c9e87..cb4d7eb74 100644 --- a/cmd/dymint/commands/show_node_id.go +++ b/cmd/dymint/commands/show_node_id.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/p2p" ) - var ShowNodeIDCmd = &cobra.Command{ Use: "show-node-id", Aliases: []string{"show_node_id"}, @@ -27,7 +26,7 @@ func showNodeID(cmd *cobra.Command, args []string) error { if err != nil { return err } - + host, err := libp2p.New(libp2p.Identity(signingKey)) if err != nil { return err diff --git a/cmd/dymint/commands/show_sequencer.go b/cmd/dymint/commands/show_sequencer.go index 2faff6840..84304ad73 100644 --- a/cmd/dymint/commands/show_sequencer.go +++ b/cmd/dymint/commands/show_sequencer.go @@ -9,13 +9,11 @@ import ( "github.com/tendermint/tendermint/privval" ) - var ShowSequencer = &cobra.Command{ Use: "show-sequencer", Aliases: []string{"show_sequencer"}, Short: "Show this node's sequencer info", RunE: showSequencer, - } func showSequencer(cmd *cobra.Command, args []string) error { diff --git a/cmd/dymint/commands/start.go b/cmd/dymint/commands/start.go index 1615ff2cd..8d9d99763 100644 --- a/cmd/dymint/commands/start.go +++ b/cmd/dymint/commands/start.go @@ -32,8 +32,6 @@ import ( var genesisHash []byte - - func NewRunNodeCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", @@ -125,7 +123,6 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L logger.Info("Started dymint node") - tmos.TrapSignal(logger, func() { logger.Info("Caught SIGTERM. Exiting...") if dymintNode.IsRunning() { @@ -135,7 +132,6 @@ func startInProcess(config *cfg.NodeConfig, tmConfig *tmcfg.Config, logger log.L } }) - select {} } @@ -148,7 +144,6 @@ func checkGenesisHash(config *tmcfg.Config) error { return nil } - f, err := os.Open(config.GenesisFile()) if err != nil { return fmt.Errorf("can't open genesis file: %w", err) @@ -164,7 +159,6 @@ func checkGenesisHash(config *tmcfg.Config) error { } actualHash := h.Sum(nil) - if !bytes.Equal(genesisHash, actualHash) { return fmt.Errorf( "--genesis_hash=%X does not match %s hash: %X", diff --git a/cmd/dymint/main.go b/cmd/dymint/main.go index 200c33f82..df07c4306 100644 --- a/cmd/dymint/main.go +++ b/cmd/dymint/main.go @@ -20,7 +20,6 @@ func main() { cli.NewCompletionCmd(rootCmd, true), ) - rootCmd.AddCommand(commands.NewRunNodeCmd()) cmd := cli.PrepareBaseCmd(rootCmd, "DM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultDymintDir))) diff --git a/config/config.go b/config/config.go index 65b9e09e3..cbf8cd6c1 100644 --- a/config/config.go +++ b/config/config.go @@ -14,7 +14,6 @@ import ( ) const ( - DefaultDymintDir = ".dymint" DefaultConfigDirName = "config" DefaultConfigFileName = "dymint.toml" @@ -23,63 +22,54 @@ const ( MaxBatchSubmitTime = 1 * time.Hour ) - type NodeConfig struct { - RootDir string DBPath string RPC RPCConfig MempoolConfig tmcfg.MempoolConfig - BlockManagerConfig `mapstructure:",squash"` DAConfig string `mapstructure:"da_config"` SettlementLayer string `mapstructure:"settlement_layer"` SettlementConfig settlement.Config `mapstructure:",squash"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` - + DAGrpc grpc.Config `mapstructure:",squash"` - + P2PConfig `mapstructure:",squash"` - + DBConfig `mapstructure:"db"` } - type BlockManagerConfig struct { - BlockTime time.Duration `mapstructure:"block_time"` - + MaxIdleTime time.Duration `mapstructure:"max_idle_time"` - + MaxProofTime time.Duration `mapstructure:"max_proof_time"` - + BatchSubmitTime time.Duration `mapstructure:"batch_submit_time"` - + MaxSkewTime time.Duration `mapstructure:"max_skew_time"` - + BatchSubmitBytes uint64 `mapstructure:"batch_submit_bytes"` - + SequencerSetUpdateInterval time.Duration `mapstructure:"sequencer_update_interval"` } - func (nc *NodeConfig) GetViperConfig(cmd *cobra.Command, homeDir string) error { v := viper.GetViper() - EnsureRoot(homeDir, nil) v.SetConfigName("dymint") - v.AddConfigPath(homeDir) - v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) + v.AddConfigPath(homeDir) + v.AddConfigPath(filepath.Join(homeDir, DefaultConfigDirName)) - err := BindDymintFlags(cmd, v) if err != nil { return err } - err = v.ReadInConfig() if err != nil { return err @@ -126,7 +116,6 @@ func (nc NodeConfig) Validate() error { return nil } - func (c BlockManagerConfig) Validate() error { if c.BlockTime < MinBlockTime { return fmt.Errorf("block_time cannot be less than %s", MinBlockTime) @@ -139,7 +128,7 @@ func (c BlockManagerConfig) Validate() error { if c.MaxIdleTime < 0 { return fmt.Errorf("max_idle_time must be positive or zero to disable") } - + if c.MaxIdleTime != 0 { if c.MaxIdleTime <= c.BlockTime || c.MaxIdleTime > MaxBatchSubmitTime { return fmt.Errorf("max_idle_time must be greater than block_time and not greater than %s", MaxBatchSubmitTime) @@ -203,14 +192,9 @@ func (nc NodeConfig) validateInstrumentation() error { return nc.Instrumentation.Validate() } - type InstrumentationConfig struct { - - - Prometheus bool `mapstructure:"prometheus"` - PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` } @@ -222,11 +206,9 @@ func (ic InstrumentationConfig) Validate() error { return nil } - type DBConfig struct { - SyncWrites bool `mapstructure:"sync_writes"` - + InMemory bool `mapstructure:"in_memory"` } diff --git a/config/defaults.go b/config/defaults.go index 0a75b14a6..3b88927d1 100644 --- a/config/defaults.go +++ b/config/defaults.go @@ -9,7 +9,6 @@ import ( ) const ( - DefaultListenAddress = "/ip4/0.0.0.0/tcp/26656" DefaultHomeDir = "sequencer_keys" @@ -17,10 +16,8 @@ const ( DefaultSequencerSetUpdateInterval = 3 * time.Minute ) - var DefaultNodeConfig = *DefaultConfig("") - func DefaultConfig(home string) *NodeConfig { cfg := &NodeConfig{ BlockManagerConfig: BlockManagerConfig{ @@ -57,7 +54,6 @@ func DefaultConfig(home string) *NodeConfig { } keyringDir := filepath.Join(home, DefaultHomeDir) - defaultSlGrpcConfig := settlement.GrpcConfig{ Host: "127.0.0.1", Port: 7981, @@ -79,7 +75,6 @@ func DefaultConfig(home string) *NodeConfig { } cfg.SettlementConfig = defaultSLconfig - defaultDAGrpc := grpc.Config{ Host: "127.0.0.1", Port: 7980, diff --git a/config/flags.go b/config/flags.go index d476c39f2..1ba58dd0a 100644 --- a/config/flags.go +++ b/config/flags.go @@ -32,11 +32,7 @@ const ( FlagP2PBootstrapRetryTime = "dymint.p2p_config.bootstrap_retry_time" ) - - - func AddNodeFlags(cmd *cobra.Command) { - tmcmd.AddNodeFlags(cmd) def := DefaultNodeConfig @@ -58,7 +54,7 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String(FlagP2PListenAddress, def.P2PConfig.ListenAddress, "P2P listen address") cmd.Flags().String(FlagP2PBootstrapNodes, def.P2PConfig.BootstrapNodes, "P2P bootstrap nodes") cmd.Flags().Duration(FlagP2PBootstrapRetryTime, def.P2PConfig.BootstrapRetryTime, "P2P bootstrap time") - cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") + cmd.Flags().Uint64(FlagP2PGossipCacheSize, uint64(def.P2PConfig.GossipSubCacheSize), "P2P Gossiped blocks cache size") } func BindDymintFlags(cmd *cobra.Command, v *viper.Viper) error { diff --git a/config/p2p.go b/config/p2p.go index 71b18b180..f029a8877 100644 --- a/config/p2p.go +++ b/config/p2p.go @@ -5,27 +5,24 @@ import ( "time" ) - type P2PConfig struct { - ListenAddress string `mapstructure:"p2p_listen_address"` - + BootstrapNodes string `mapstructure:"p2p_bootstrap_nodes"` - + PersistentNodes string `mapstructure:"p2p_persistent_nodes"` - + GossipSubCacheSize int `mapstructure:"p2p_gossip_cache_size"` - + BootstrapRetryTime time.Duration `mapstructure:"p2p_bootstrap_retry_time"` - + BlockSyncEnabled bool `mapstructure:"p2p_blocksync_enabled"` - + BlockSyncRequestIntervalTime time.Duration `mapstructure:"p2p_blocksync_block_request_interval"` - + AdvertisingEnabled bool `mapstructure:"p2p_advertising_enabled"` } - func (c P2PConfig) Validate() error { if c.GossipSubCacheSize < 0 { return fmt.Errorf("gossipsub cache size cannot be negative") diff --git a/config/rpc.go b/config/rpc.go index baa5e8e7b..2625e7d91 100644 --- a/config/rpc.go +++ b/config/rpc.go @@ -1,38 +1,15 @@ package config - type RPCConfig struct { ListenAddress string - CORSAllowedOrigins []string CORSAllowedMethods []string CORSAllowedHeaders []string - - - - - - - MaxOpenConnections int - - - - - - - - - TLSCertFile string `mapstructure:"tls-cert-file"` - - - - - TLSKeyFile string `mapstructure:"tls-key-file"` } diff --git a/config/toml.go b/config/toml.go index 4bf51e276..7bf2022e3 100644 --- a/config/toml.go +++ b/config/toml.go @@ -9,7 +9,6 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" ) - const DefaultDirPerm = 0o700 var configTemplate *template.Template @@ -24,10 +23,6 @@ func init() { } } - - - - func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil { panic(err.Error()) @@ -42,13 +37,11 @@ func EnsureRoot(rootDir string, defaultConfig *NodeConfig) { configFilePath := filepath.Join(rootDir, DefaultConfigDirName, DefaultConfigFileName) - if !tmos.FileExists(configFilePath) { WriteConfigFile(configFilePath, defaultConfig) } } - func WriteConfigFile(configFilePath string, config *NodeConfig) { var buffer bytes.Buffer @@ -59,8 +52,6 @@ func WriteConfigFile(configFilePath string, config *NodeConfig) { tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0o644) } - - const defaultConfigTemplate = ` ####################################################### ### Dymint Configuration Options ### diff --git a/conv/config.go b/conv/config.go index ec9b9e7f4..d255c0611 100644 --- a/conv/config.go +++ b/conv/config.go @@ -8,10 +8,6 @@ import ( "github.com/dymensionxyz/dymint/config" ) - - - - func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf == nil { return errors.New("tendermint config is nil but required to populate Dymint config") @@ -31,7 +27,7 @@ func GetNodeConfig(nodeConf *config.NodeConfig, tmConf *tmcfg.Config) error { if tmConf.Mempool == nil { return errors.New("tendermint mempool config is nil but required to populate Dymint config") } - + nodeConf.MempoolConfig = *tmConf.Mempool return nil diff --git a/conv/crypto.go b/conv/crypto.go index 4f04470fa..8c8f5077b 100644 --- a/conv/crypto.go +++ b/conv/crypto.go @@ -8,7 +8,6 @@ import ( "github.com/tendermint/tendermint/p2p" ) - func GetNodeKey(nodeKey *p2p.NodeKey) (crypto.PrivKey, error) { if nodeKey == nil || nodeKey.PrivKey == nil { return nil, ErrNilKey diff --git a/da/avail/avail.go b/da/avail/avail.go index 3d375b000..21b58f9f3 100644 --- a/da/avail/avail.go +++ b/da/avail/avail.go @@ -34,7 +34,7 @@ const ( DataCallMethod = "submit_data" DataCallSectionIndex = 29 DataCallMethodIndex = 1 - maxBlobSize = 2097152 + maxBlobSize = 2097152 ) type SubstrateApiI interface { @@ -74,35 +74,30 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - func WithClient(client SubstrateApiI) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).client = client } } - func WithTxInclusionTimeout(timeout time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).txInclusionTimeout = timeout } } - func WithBatchRetryDelay(delay time.Duration) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryDelay = delay } } - func WithBatchRetryAttempts(attempts uint) da.Option { return func(dalc da.DataAvailabilityLayerClient) { dalc.(*DataAvailabilityLayerClient).batchRetryAttempts = attempts } } - func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -114,18 +109,15 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S } } - c.pubsubServer = pubsubServer c.txInclusionTimeout = defaultTxInculsionTimeout c.batchRetryDelay = defaultBatchRetryDelay c.batchRetryAttempts = defaultBatchRetryAttempts - for _, apply := range options { apply(c) } - if c.client == nil { substrateApiClient, err := gsrpc.NewSubstrateAPI(c.config.ApiURL) if err != nil { @@ -144,32 +136,26 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S return nil } - func (c *DataAvailabilityLayerClient) Start() error { c.synced <- struct{}{} return nil } - func (c *DataAvailabilityLayerClient) Stop() error { c.cancel() close(c.synced) return nil } - func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Avail } - func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { - blockHash, err := c.client.GetBlockHash(daMetaData.Height) if err != nil { return da.ResultRetrieveBatch{ @@ -190,10 +176,9 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet }, } } - + var batches []*types.Batch for _, ext := range block.Block.Extrinsics { - if ext.Signature.AppID.Int64() == c.config.AppID && ext.Method.CallIndex.SectionIndex == DataCallSectionIndex && ext.Method.CallIndex.MethodIndex == DataCallMethodIndex { @@ -206,16 +191,16 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet c.logger.Error("unmarshal batch", "daHeight", daMetaData.Height, "error", err) continue } - + batch := &types.Batch{} err = batch.FromProto(&pbBatch) if err != nil { c.logger.Error("batch from proto", "daHeight", daMetaData.Height, "error", err) continue } - + batches = append(batches, batch) - + data = data[proto.Size(&pbBatch):] } @@ -233,7 +218,6 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet } } - func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { blob, err := batch.MarshalBinary() if err != nil { @@ -250,8 +234,6 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return c.submitBatchLoop(blob) } - - func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.ResultSubmitBatch { for { select { @@ -318,8 +300,6 @@ func (c *DataAvailabilityLayerClient) submitBatchLoop(dataBlob []byte) da.Result } } - - func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { meta, err := c.client.GetMetadataLatest() if err != nil { @@ -329,7 +309,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - + ext := availtypes.NewExtrinsic(newCall) genesisHash, err := c.client.GetBlockHash(0) if err != nil { @@ -343,7 +323,7 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - + key, err := availtypes.CreateStorageKey(meta, "System", "Account", keyringPair.PublicKey) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) @@ -364,16 +344,14 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { SpecVersion: rv.SpecVersion, Tip: availtypes.NewUCompactFromUInt(c.config.Tip), TransactionVersion: rv.TransactionVersion, - AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), + AppID: availtypes.NewUCompactFromUInt(uint64(c.config.AppID)), } - err = ext.Sign(keyringPair, options) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastConfigError, err) } - sub, err := c.client.SubmitAndWatchExtrinsic(ext) if err != nil { return 0, fmt.Errorf("%w: %s", da.ErrTxBroadcastNetworkError, err) @@ -419,7 +397,6 @@ func (c *DataAvailabilityLayerClient) broadcastTx(tx []byte) (uint64, error) { } } - func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -429,7 +406,6 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } - func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (uint64, error) { c.logger.Debug("Getting block height from hash", "hash", hash) header, err := c.client.GetHeader(hash) @@ -439,12 +415,10 @@ func (c *DataAvailabilityLayerClient) getHeightFromHash(hash availtypes.Hash) (u return uint64(header.Number), nil } - func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } - func (c *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { return da.Balance{}, nil } diff --git a/da/celestia/celestia.go b/da/celestia/celestia.go index 6eda30bf5..ae947ffc5 100644 --- a/da/celestia/celestia.go +++ b/da/celestia/celestia.go @@ -26,7 +26,6 @@ import ( uretry "github.com/dymensionxyz/dymint/utils/retry" ) - type DataAvailabilityLayerClient struct { rpc celtypes.CelestiaRPCClient @@ -43,35 +42,30 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - func WithRPCClient(rpc celtypes.CelestiaRPCClient) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).rpc = rpc } } - func WithRPCRetryDelay(delay time.Duration) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryDelay = delay } } - func WithRPCAttempts(attempts int) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.RetryAttempts = &attempts } } - func WithSubmitBackoff(c uretry.BackoffConfig) da.Option { return func(daLayerClient da.DataAvailabilityLayerClient) { daLayerClient.(*DataAvailabilityLayerClient).config.Backoff = c } } - func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { c.logger = logger c.synced = make(chan struct{}, 1) @@ -85,7 +79,6 @@ func (c *DataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.S c.pubsubServer = pubsubServer - for _, apply := range options { apply(c) } @@ -113,8 +106,6 @@ func createConfig(bz []byte) (c Config, err error) { return c, errors.New("gas prices must be set") } - - if c.RetryDelay == 0 { c.RetryDelay = defaultRpcRetryDelay } @@ -128,11 +119,9 @@ func createConfig(bz []byte) (c Config, err error) { return c, nil } - func (c *DataAvailabilityLayerClient) Start() (err error) { c.logger.Info("Starting Celestia Data Availability Layer Client.") - if c.rpc != nil { c.logger.Info("Celestia-node client already set.") return nil @@ -150,7 +139,6 @@ func (c *DataAvailabilityLayerClient) Start() (err error) { return } - func (c *DataAvailabilityLayerClient) Stop() error { c.logger.Info("Stopping Celestia Data Availability Layer Client.") err := c.pubsubServer.Stop() @@ -162,17 +150,14 @@ func (c *DataAvailabilityLayerClient) Stop() error { return nil } - func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - func (c *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Celestia } - func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { data, err := batch.MarshalBinary() if err != nil { @@ -204,10 +189,9 @@ func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{} default: - height, commitment, err := c.submit(data) if errors.Is(err, gerrc.ErrInternal) { - + err = fmt.Errorf("submit: %w", err) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -273,7 +257,7 @@ func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMet resultRetrieveBatch = c.retrieveBatches(daMetaData) return resultRetrieveBatch.Error }, - retry.Attempts(uint(*c.config.RetryAttempts)), + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -368,7 +352,7 @@ func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu return nil }, - retry.Attempts(uint(*c.config.RetryAttempts)), + retry.Attempts(uint(*c.config.RetryAttempts)), retry.DelayType(retry.FixedDelay), retry.Delay(c.config.RetryDelay), ) @@ -392,7 +376,6 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu dah, err := c.getDataAvailabilityHeaders(daMetaData.Height) if err != nil { - return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -407,10 +390,6 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu proof, err := c.getProof(daMetaData) if err != nil || proof == nil { - - - - return da.ResultCheckBatch{ BaseResult: da.BaseResult{ Code: da.StatusError, @@ -433,9 +412,6 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu if daMetaData.Index > 0 && daMetaData.Length > 0 { if index != daMetaData.Index || shares != daMetaData.Length { - - - return da.ResultCheckBatch{ CheckMetaData: DACheckMetaData, BaseResult: da.BaseResult{ @@ -449,9 +425,7 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } included, err = c.validateProof(daMetaData, proof) - - - + if err != nil { return da.ResultCheckBatch{ BaseResult: da.BaseResult{ @@ -485,7 +459,6 @@ func (c *DataAvailabilityLayerClient) checkBatchAvailability(daMetaData *da.DASu } } - func (c *DataAvailabilityLayerClient) submit(daBlob da.Blob) (uint64, da.Commitment, error) { blobs, commitments, err := c.blobsAndCommitments(daBlob) if err != nil { @@ -554,7 +527,6 @@ func (c *DataAvailabilityLayerClient) getDataAvailabilityHeaders(height uint64) return headers.DAH, nil } - func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { sync := func() error { done := make(chan error, 1) @@ -579,7 +551,7 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } err := retry.Do(sync, - retry.Attempts(0), + retry.Attempts(0), retry.Delay(10*time.Second), retry.LastErrorOnly(true), retry.DelayType(retry.FixedDelay), @@ -596,12 +568,10 @@ func (c *DataAvailabilityLayerClient) sync(rpc *openrpc.Client) { } } - func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSizeBytes } - func (d *DataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ctx, cancel := context.WithTimeout(d.ctx, d.config.Timeout) defer cancel() diff --git a/da/celestia/config.go b/da/celestia/config.go index a1f764d4d..aa6c595a7 100644 --- a/da/celestia/config.go +++ b/da/celestia/config.go @@ -24,7 +24,6 @@ var defaultSubmitBackoff = uretry.NewBackoffConfig( uretry.WithMaxDelay(time.Second*6), ) - type Config struct { BaseURL string `json:"base_url,omitempty"` AppNodeURL string `json:"app_node_url,omitempty"` @@ -60,13 +59,12 @@ func (c *Config) InitNamespaceID() error { if c.NamespaceIDStr == "" { c.NamespaceIDStr = generateRandNamespaceID() } - + namespaceBytes, err := hex.DecodeString(c.NamespaceIDStr) if err != nil { return fmt.Errorf("decode string: %w", err) } - if len(namespaceBytes) != openrpcns.NamespaceVersionZeroIDSize { return fmt.Errorf("wrong length: got: %v: expect %v", len(namespaceBytes), openrpcns.NamespaceVersionZeroIDSize) } diff --git a/da/celestia/mock/messages.go b/da/celestia/mock/messages.go index d0140a084..1f55be06c 100644 --- a/da/celestia/mock/messages.go +++ b/da/celestia/mock/messages.go @@ -5,17 +5,12 @@ import ( "encoding/binary" ) - - - const ( shareSize = 256 namespaceSize = 8 msgShareSize = shareSize - namespaceSize ) - - func splitMessage(rawData []byte, nid []byte) []NamespacedShare { shares := make([]NamespacedShare, 0) firstRawShare := append(append( @@ -40,10 +35,8 @@ func splitMessage(rawData []byte, nid []byte) []NamespacedShare { return shares } - type Share []byte - type NamespacedShare struct { Share ID []byte @@ -68,8 +61,6 @@ func zeroPadIfNecessary(share []byte, width int) []byte { return share } - - func marshalDelimited(data []byte) ([]byte, error) { lenBuf := make([]byte, binary.MaxVarintLen64) length := uint64(len(data)) @@ -77,8 +68,6 @@ func marshalDelimited(data []byte) ([]byte, error) { return append(lenBuf[:n], data...), nil } - - func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []NamespacedShare { if len(rawData) <= msgShareSize { rawShare := append(append( @@ -89,7 +78,7 @@ func appendToShares(shares []NamespacedShare, nid []byte, rawData []byte) []Name paddedShare := zeroPadIfNecessary(rawShare, shareSize) share := NamespacedShare{paddedShare, nid} shares = append(shares, share) - } else { + } else { shares = append(shares, splitMessage(rawData, nid)...) } return shares diff --git a/da/celestia/mock/server.go b/da/celestia/mock/server.go index 98434285a..22b64d569 100644 --- a/da/celestia/mock/server.go +++ b/da/celestia/mock/server.go @@ -20,7 +20,6 @@ import ( "github.com/dymensionxyz/dymint/types" ) - type Server struct { da *local.DataAvailabilityLayerClient blockTime time.Duration @@ -28,7 +27,6 @@ type Server struct { logger types.Logger } - func NewServer(blockTime time.Duration, logger types.Logger) *Server { return &Server{ da: new(local.DataAvailabilityLayerClient), @@ -37,7 +35,6 @@ func NewServer(blockTime time.Duration, logger types.Logger) *Server { } } - func (s *Server) Start(listener net.Listener) error { err := s.da.Init([]byte(s.blockTime.String()), pubsub.NewServer(), store.NewDefaultInMemoryKVStore(), s.logger) if err != nil { @@ -56,7 +53,6 @@ func (s *Server) Start(listener net.Listener) error { return nil } - func (s *Server) Stop() { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() diff --git a/da/celestia/rpc.go b/da/celestia/rpc.go index be0265f1f..c79cc53d4 100644 --- a/da/celestia/rpc.go +++ b/da/celestia/rpc.go @@ -14,49 +14,40 @@ import ( var _ types.CelestiaRPCClient = &OpenRPC{} - type OpenRPC struct { rpc *openrpc.Client } - func NewOpenRPC(rpc *openrpc.Client) *OpenRPC { return &OpenRPC{ rpc: rpc, } } - func (c *OpenRPC) GetAll(ctx context.Context, height uint64, namespaces []share.Namespace) ([]*blob.Blob, error) { return c.rpc.Blob.GetAll(ctx, height, namespaces) } - func (c *OpenRPC) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { return c.rpc.Blob.Submit(ctx, blobs, options) } - func (c *OpenRPC) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { return c.rpc.Blob.GetProof(ctx, height, namespace, commitment) } - func (c *OpenRPC) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { return c.rpc.Blob.Get(ctx, height, namespace, commitment) } - func (c *OpenRPC) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { return c.rpc.Header.GetByHeight(ctx, height) } - func (c *OpenRPC) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { return c.rpc.Blob.Included(ctx, height, namespace, proof, commitment) } - func (c *OpenRPC) GetSignerBalance(ctx context.Context) (*state.Balance, error) { return c.rpc.State.Balance(ctx) } diff --git a/da/celestia/types/rpc.go b/da/celestia/types/rpc.go index 8fded2362..0c0ce106d 100644 --- a/da/celestia/types/rpc.go +++ b/da/celestia/types/rpc.go @@ -10,16 +10,13 @@ import ( ) type CelestiaRPCClient interface { - Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) GetAll(context.Context, uint64, []share.Namespace) ([]*blob.Blob, error) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) - GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) - GetSignerBalance(ctx context.Context) (*state.Balance, error) } diff --git a/da/celestia/types/types.go b/da/celestia/types/types.go index 52be192a6..3b2217add 100644 --- a/da/celestia/types/types.go +++ b/da/celestia/types/types.go @@ -4,74 +4,42 @@ import ( "math" ) - - - - const ( - NamespaceVersionSize = 1 - - + NamespaceVersionMaxValue = math.MaxUint8 - NamespaceIDSize = 28 - NamespaceSize = NamespaceVersionSize + NamespaceIDSize - ShareSize = 512 - - ShareInfoBytes = 1 - - SequenceLenBytes = 4 - ShareVersionZero = uint8(0) - - DefaultShareVersion = ShareVersionZero - - CompactShareReservedBytes = 4 - - FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes - - ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes - - FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - - ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - MinSquareSize = 1 - - MinShareCount = MinSquareSize * MinSquareSize - MaxShareVersion = 127 - DefaultGovMaxSquareSize = 64 - DefaultMaxBytes = DefaultGovMaxSquareSize * DefaultGovMaxSquareSize * ContinuationSparseShareContentSize ) diff --git a/da/da.go b/da/da.go index cd85c0e7d..7f948dfc8 100644 --- a/da/da.go +++ b/da/da.go @@ -15,30 +15,20 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - - - type StatusCode int32 - type Commitment = []byte - type Blob = []byte - const ( StatusUnknown StatusCode = iota StatusSuccess StatusError ) - type Client string - const ( Mock Client = "mock" Celestia Client = "celestia" @@ -46,34 +36,29 @@ const ( Grpc Client = "grpc" ) - type Option func(DataAvailabilityLayerClient) - type BaseResult struct { - Code StatusCode - + Message string - + Error error } - type DASubmitMetaData struct { - Height uint64 - + Namespace []byte - + Client Client - + Commitment Commitment - + Index int - + Length int - + Root []byte } @@ -84,9 +69,7 @@ type Balance struct { const PathSeparator = "|" - func (d *DASubmitMetaData) ToPath() string { - if d.Commitment != nil { commitment := hex.EncodeToString(d.Commitment) dataroot := hex.EncodeToString(d.Root) @@ -109,7 +92,6 @@ func (d *DASubmitMetaData) ToPath() string { } } - func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { pathParts := strings.FieldsFunc(path, func(r rune) bool { return r == rune(PathSeparator[0]) }) if len(pathParts) < 2 { @@ -125,7 +107,7 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { Height: height, Client: Client(pathParts[0]), } - + if len(pathParts) == 7 { submitData.Index, err = strconv.Atoi(pathParts[2]) if err != nil { @@ -152,93 +134,72 @@ func (d *DASubmitMetaData) FromPath(path string) (*DASubmitMetaData, error) { return submitData, nil } - type DACheckMetaData struct { - Height uint64 - + Client Client - + SLIndex uint64 - + Namespace []byte - + Commitment Commitment - + Index int - + Length int - + Proofs []*blob.Proof - + NMTRoots []byte - + RowProofs []*merkle.Proof - + Root []byte } - type ResultSubmitBatch struct { BaseResult - + SubmitMetaData *DASubmitMetaData } - type ResultCheckBatch struct { BaseResult - + CheckMetaData *DACheckMetaData } - type ResultRetrieveBatch struct { BaseResult - - + Batches []*types.Batch - + CheckMetaData *DACheckMetaData } - - type DataAvailabilityLayerClient interface { - Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...Option) error - Start() error - Stop() error - - - SubmitBatch(batch *types.Batch) ResultSubmitBatch GetClientType() Client - CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch - WaitForSyncing() - GetMaxBlobSizeBytes() uint32 - GetSignerBalance() (Balance, error) } - - type BatchRetriever interface { - RetrieveBatches(daMetaData *DASubmitMetaData) ResultRetrieveBatch - + CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch } diff --git a/da/errors.go b/da/errors.go index dca7871b7..02c040efa 100644 --- a/da/errors.go +++ b/da/errors.go @@ -7,26 +7,25 @@ import ( ) var ( - ErrTxBroadcastConfigError = errors.New("failed building tx") - + ErrTxBroadcastNetworkError = errors.New("failed broadcasting tx") - + ErrTxBroadcastTimeout = errors.New("broadcast timeout error") - + ErrUnableToGetProof = errors.New("unable to get proof") - + ErrRetrieval = errors.New("retrieval failed") - + ErrBlobNotFound = errors.New("blob not found") - + ErrBlobNotIncluded = errors.New("blob not included") - + ErrBlobNotParsed = errors.New("unable to parse blob to batch") - + ErrProofNotMatching = errors.New("proof not matching") - + ErrNameSpace = errors.New("namespace not matching") - + ErrDAMismatch = gerrc.ErrInvalidArgument.Wrap("DA in config not matching DA path") ) diff --git a/da/grpc/grpc.go b/da/grpc/grpc.go index 7daa0c667..3c18f9328 100644 --- a/da/grpc/grpc.go +++ b/da/grpc/grpc.go @@ -16,8 +16,7 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) -const maxBlobSize = 2097152 - +const maxBlobSize = 2097152 type DataAvailabilityLayerClient struct { config Config @@ -28,14 +27,11 @@ type DataAvailabilityLayerClient struct { logger types.Logger } - type Config struct { - Host string `json:"host"` Port int `json:"port"` } - var DefaultConfig = Config{ Host: "127.0.0.1", Port: 7980, @@ -46,7 +42,6 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { d.logger = logger d.synced = make(chan struct{}, 1) @@ -57,14 +52,13 @@ func (d *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, _ st return json.Unmarshal(config, &d.config) } - func (d *DataAvailabilityLayerClient) Start() error { d.logger.Info("starting GRPC DALC", "host", d.config.Host, "port", d.config.Port) d.synced <- struct{}{} var err error var opts []grpc.DialOption - + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) d.conn, err = grpc.Dial(d.config.Host+":"+strconv.Itoa(d.config.Port), opts...) if err != nil { @@ -75,23 +69,19 @@ func (d *DataAvailabilityLayerClient) Start() error { return nil } - func (d *DataAvailabilityLayerClient) Stop() error { d.logger.Info("stopping GRPC DALC") return d.conn.Close() } - func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - func (d *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Grpc } - func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { resp, err := d.client.SubmitBatch(context.TODO(), &dalc.SubmitBatchRequest{Batch: batch.ToProto()}) if err != nil { @@ -111,7 +101,6 @@ func (d *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } - func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { resp, err := d.client.CheckBatchAvailability(context.TODO(), &dalc.CheckBatchAvailabilityRequest{DataLayerHeight: daMetaData.Height}) if err != nil { @@ -122,12 +111,10 @@ func (d *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASu } } - func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } - func (d *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { resp, err := d.client.RetrieveBatches(context.TODO(), &dalc.RetrieveBatchesRequest{DataLayerHeight: daMetaData.Height}) if err != nil { diff --git a/da/grpc/mockserv/mockserv.go b/da/grpc/mockserv/mockserv.go index e303e2901..47a6fceda 100644 --- a/da/grpc/mockserv/mockserv.go +++ b/da/grpc/mockserv/mockserv.go @@ -17,7 +17,6 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - func GetServer(kv store.KV, conf grpcda.Config, mockConfig []byte) *grpc.Server { logger := tmlog.NewTMLogger(os.Stdout) diff --git a/da/local/local.go b/da/local/local.go index 009beaab8..bc7ad96ef 100644 --- a/da/local/local.go +++ b/da/local/local.go @@ -1,7 +1,7 @@ package local import ( - "crypto/sha1" + "crypto/sha1" "encoding/binary" "math/rand" "sync/atomic" @@ -14,8 +14,6 @@ import ( "github.com/tendermint/tendermint/libs/pubsub" ) - - type DataAvailabilityLayerClient struct { logger types.Logger dalcKV store.KV @@ -26,7 +24,7 @@ type DataAvailabilityLayerClient struct { const ( defaultBlockTime = 3 * time.Second - maxBlobSize = 2097152 + maxBlobSize = 2097152 ) type config struct { @@ -38,7 +36,6 @@ var ( _ da.BatchRetriever = &DataAvailabilityLayerClient{} ) - func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalcKV store.KV, logger types.Logger, options ...da.Option) error { m.logger = logger m.dalcKV = dalcKV @@ -56,7 +53,6 @@ func (m *DataAvailabilityLayerClient) Init(config []byte, _ *pubsub.Server, dalc return nil } - func (m *DataAvailabilityLayerClient) Start() error { m.logger.Debug("Mock Data Availability Layer Client starting") m.synced <- struct{}{} @@ -70,26 +66,20 @@ func (m *DataAvailabilityLayerClient) Start() error { return nil } - func (m *DataAvailabilityLayerClient) Stop() error { m.logger.Debug("Mock Data Availability Layer Client stopped") close(m.synced) return nil } - func (m *DataAvailabilityLayerClient) WaitForSyncing() { <-m.synced } - func (m *DataAvailabilityLayerClient) GetClientType() da.Client { return da.Mock } - - - func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { daHeight := m.daHeight.Load() @@ -99,7 +89,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) + hash := sha1.Sum(uint64ToBinary(batch.EndHeight())) err = m.dalcKV.Set(getKey(daHeight, batch.StartHeight()), hash[:]) if err != nil { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} @@ -109,7 +99,7 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: err.Error(), Error: err}} } - m.daHeight.Store(daHeight + 1) + m.daHeight.Store(daHeight + 1) return da.ResultSubmitBatch{ BaseResult: da.BaseResult{ @@ -123,13 +113,11 @@ func (m *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultS } } - func (m *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { batchesRes := m.RetrieveBatches(daMetaData) return da.ResultCheckBatch{BaseResult: da.BaseResult{Code: batchesRes.Code, Message: batchesRes.Message, Error: batchesRes.Error}, CheckMetaData: batchesRes.CheckMetaData} } - func (m *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { if daMetaData.Height >= m.daHeight.Load() { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: "batch not found", Error: da.ErrBlobNotFound}} @@ -174,11 +162,10 @@ func getKey(daHeight uint64, height uint64) []byte { } func (m *DataAvailabilityLayerClient) updateDAHeight() { - blockStep := rand.Uint64()%10 + 1 + blockStep := rand.Uint64()%10 + 1 m.daHeight.Add(blockStep) } - func (d *DataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return maxBlobSize } diff --git a/da/registry/registry.go b/da/registry/registry.go index 4779e2ba0..f4725c3a8 100644 --- a/da/registry/registry.go +++ b/da/registry/registry.go @@ -8,7 +8,6 @@ import ( "github.com/dymensionxyz/dymint/da/local" ) - var clients = map[string]func() da.DataAvailabilityLayerClient{ "mock": func() da.DataAvailabilityLayerClient { return &local.DataAvailabilityLayerClient{} }, "grpc": func() da.DataAvailabilityLayerClient { return &grpc.DataAvailabilityLayerClient{} }, @@ -16,7 +15,6 @@ var clients = map[string]func() da.DataAvailabilityLayerClient{ "avail": func() da.DataAvailabilityLayerClient { return &avail.DataAvailabilityLayerClient{} }, } - func GetClient(name string) da.DataAvailabilityLayerClient { f, ok := clients[name] if !ok { @@ -25,7 +23,6 @@ func GetClient(name string) da.DataAvailabilityLayerClient { return f() } - func RegisteredClients() []string { registered := make([]string, 0, len(clients)) for name := range clients { diff --git a/indexers/blockindexer/block.go b/indexers/blockindexer/block.go index 0ac87ba8f..61f16c7ab 100644 --- a/indexers/blockindexer/block.go +++ b/indexers/blockindexer/block.go @@ -8,19 +8,12 @@ import ( "github.com/tendermint/tendermint/types" ) - type BlockIndexer interface { - - Has(height int64) (bool, error) - Index(types.EventDataNewBlockHeader) error - - Search(ctx context.Context, q *query.Query) ([]int64, error) - Prune(from, to uint64, logger log.Logger) (uint64, error) } diff --git a/indexers/blockindexer/kv/kv.go b/indexers/blockindexer/kv/kv.go index d2b1b813a..7b943a454 100644 --- a/indexers/blockindexer/kv/kv.go +++ b/indexers/blockindexer/kv/kv.go @@ -27,9 +27,6 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) - - - type BlockerIndexer struct { store store.KV } @@ -40,8 +37,6 @@ func New(store store.KV) *BlockerIndexer { } } - - func (idx *BlockerIndexer) Has(height int64) (bool, error) { key, err := heightKey(height) if err != nil { @@ -55,18 +50,12 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return err == nil, err } - - - - - - func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Discard() height := bh.Header.Height - + key, err := heightKey(height) if err != nil { return fmt.Errorf("create block height index key: %w", err) @@ -75,18 +64,16 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return err } - beginKeys, err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height) if err != nil { return fmt.Errorf("index BeginBlock events: %w", err) } - + endKeys, err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height) if err != nil { return fmt.Errorf("index EndBlock events: %w", err) } - err = idx.addEventKeys(height, &beginKeys, &endKeys, batch) if err != nil { return err @@ -94,11 +81,6 @@ func (idx *BlockerIndexer) Index(bh tmtypes.EventDataNewBlockHeader) error { return batch.Commit() } - - - - - func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { @@ -113,8 +95,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return nil, fmt.Errorf("parse query conditions: %w", err) } - - height, ok := lookForHeight(conditions) if ok { ok, err := idx.Has(height) @@ -132,11 +112,8 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, var heightsInitialized bool filteredHeights := make(map[string][]byte) - skipIndexes := make([]int, 0) - - ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -155,8 +132,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - - if len(filteredHeights) == 0 { break } @@ -169,7 +144,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -188,8 +162,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, heightsInitialized = true - - if len(filteredHeights) == 0 { break } @@ -201,7 +173,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, } } - results = make([]int64, 0, len(filteredHeights)) for _, hBz := range filteredHeights { cont := true @@ -232,12 +203,6 @@ func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, return results, nil } - - - - - - func (idx *BlockerIndexer) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -245,8 +210,6 @@ func (idx *BlockerIndexer) matchRange( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - - if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -314,18 +277,9 @@ LOOP: } if len(tmpHeights) == 0 || firstRun { - - - - - - - return tmpHeights, nil } - - for k := range filteredHeights { cont := true @@ -348,12 +302,6 @@ LOOP: return filteredHeights, nil } - - - - - - func (idx *BlockerIndexer) match( ctx context.Context, c query.Condition, @@ -361,8 +309,6 @@ func (idx *BlockerIndexer) match( filteredHeights map[string][]byte, firstRun bool, ) (map[string][]byte, error) { - - if !firstRun && len(filteredHeights) == 0 { return filteredHeights, nil } @@ -457,18 +403,9 @@ func (idx *BlockerIndexer) match( } if len(tmpHeights) == 0 || firstRun { - - - - - - - return tmpHeights, nil } - - for k := range filteredHeights { cont := true @@ -495,7 +432,7 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, heightBz := int64ToBytes(height) keys := dmtypes.EventKeys{} for _, event := range events { - + if len(event.Type) == 0 { continue } @@ -505,7 +442,6 @@ func (idx *BlockerIndexer) indexEvents(batch store.KVBatch, events []abci.Event, continue } - compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if compositeKey == tmtypes.BlockHeightKey { return dmtypes.EventKeys{}, fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) @@ -546,9 +482,8 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint return nil } - for h := int64(from); h < int64(to); h++ { + for h := int64(from); h < int64(to); h++ { - if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -592,7 +527,7 @@ func (idx *BlockerIndexer) pruneBlocks(from, to uint64, logger log.Logger) (uint } - err := flush(batch, int64(to)) + err := flush(batch, int64(to)) if err != nil { return 0, err } diff --git a/indexers/blockindexer/kv/kv_test.go b/indexers/blockindexer/kv/kv_test.go index ff017616e..63d68c840 100644 --- a/indexers/blockindexer/kv/kv_test.go +++ b/indexers/blockindexer/kv/kv_test.go @@ -148,7 +148,6 @@ func TestBlockIndexer(t *testing.T) { } func TestBlockIndexerPruning(t *testing.T) { - // init the block indexer prefixStore := store.NewPrefixKV(store.NewDefaultInMemoryKVStore(), []byte("block_events")) indexer := blockidxkv.New(prefixStore) @@ -184,7 +183,6 @@ func TestBlockIndexerPruning(t *testing.T) { results, err = indexer.Search(context.Background(), q) require.NoError(t, err) require.Equal(t, 0, len(results)) - } func getBeginBlock() abci.ResponseBeginBlock { diff --git a/indexers/blockindexer/null/null.go b/indexers/blockindexer/null/null.go index ab80fa5a9..8727e081d 100644 --- a/indexers/blockindexer/null/null.go +++ b/indexers/blockindexer/null/null.go @@ -13,7 +13,6 @@ import ( var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) - type BlockerIndexer struct{} func (idx *BlockerIndexer) Has(height int64) (bool, error) { diff --git a/indexers/blockindexer/query_range.go b/indexers/blockindexer/query_range.go index 9b2798524..537e645cf 100644 --- a/indexers/blockindexer/query_range.go +++ b/indexers/blockindexer/query_range.go @@ -6,21 +6,16 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) - - - type QueryRanges map[string]QueryRange - type QueryRange struct { - LowerBound interface{} - UpperBound interface{} + LowerBound interface{} + UpperBound interface{} Key string IncludeLowerBound bool IncludeUpperBound bool } - func (qr QueryRange) AnyBound() interface{} { if qr.LowerBound != nil { return qr.LowerBound @@ -29,8 +24,6 @@ func (qr QueryRange) AnyBound() interface{} { return qr.UpperBound } - - func (qr QueryRange) LowerBoundValue() interface{} { if qr.LowerBound == nil { return nil @@ -52,8 +45,6 @@ func (qr QueryRange) LowerBoundValue() interface{} { } } - - func (qr QueryRange) UpperBoundValue() interface{} { if qr.UpperBound == nil { return nil @@ -75,8 +66,6 @@ func (qr QueryRange) UpperBoundValue() interface{} { } } - - func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes []int) { ranges = make(QueryRanges) for i, c := range conditions { @@ -110,8 +99,6 @@ func LookForRanges(conditions []query.Condition) (ranges QueryRanges, indexes [] return ranges, indexes } - - func IsRangeOperation(op query.Operator) bool { switch op { case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: diff --git a/indexers/txindex/indexer.go b/indexers/txindex/indexer.go index 6e275a021..b744ec250 100644 --- a/indexers/txindex/indexer.go +++ b/indexers/txindex/indexer.go @@ -10,33 +10,23 @@ import ( "github.com/tendermint/tendermint/libs/pubsub/query" ) - type TxIndexer interface { - AddBatch(b *Batch) error - Index(result *abci.TxResult) error - - Get(hash []byte) (*abci.TxResult, error) - Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) - Prune(from, to uint64, logger log.Logger) (uint64, error) } - - type Batch struct { Height int64 Ops []*abci.TxResult } - func NewBatch(n int64, height int64) *Batch { return &Batch{ Height: height, @@ -44,16 +34,13 @@ func NewBatch(n int64, height int64) *Batch { } } - func (b *Batch) Add(result *abci.TxResult) error { b.Ops[result.Index] = result return nil } - func (b *Batch) Size() int { return len(b.Ops) } - var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/indexers/txindex/indexer_service.go b/indexers/txindex/indexer_service.go index 16e022f92..20e96b1a4 100644 --- a/indexers/txindex/indexer_service.go +++ b/indexers/txindex/indexer_service.go @@ -11,14 +11,10 @@ import ( "github.com/tendermint/tendermint/types" ) - - const ( subscriber = "IndexerService" ) - - type IndexerService struct { service.BaseService @@ -27,7 +23,6 @@ type IndexerService struct { eventBus *types.EventBus } - func NewIndexerService( txIdxr TxIndexer, blockIdxr indexer.BlockIndexer, @@ -38,12 +33,7 @@ func NewIndexerService( return is } - - func (is *IndexerService) OnStart() error { - - - blockHeadersSub, err := is.eventBus.Subscribe( context.Background(), subscriber, @@ -94,16 +84,13 @@ func (is *IndexerService) OnStart() error { return nil } - func (is *IndexerService) OnStop() { if is.eventBus.IsRunning() { _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) } } - func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { - indexerBaseHeight, err := s.LoadIndexerBaseHeight() if errors.Is(err, gerrc.ErrNotFound) { @@ -112,19 +99,16 @@ func (is *IndexerService) Prune(to uint64, s store.Store) (uint64, error) { return 0, err } - blockPruned, err := is.blockIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return blockPruned, err } - txPruned, err := is.txIdxr.Prune(indexerBaseHeight, to, is.Logger) if err != nil { return txPruned, err } - err = s.SaveIndexerBaseHeight(to) if err != nil { is.Logger.Error("saving indexer base height", "err", err) diff --git a/indexers/txindex/indexer_service_test.go b/indexers/txindex/indexer_service_test.go index 9c08b3d63..087b785b8 100644 --- a/indexers/txindex/indexer_service_test.go +++ b/indexers/txindex/indexer_service_test.go @@ -55,16 +55,18 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Tx: types.Tx("foo"), Result: abci.ResponseDeliverTx{ Code: 0, - Events: []abci.Event{{Type: "test_event", - Attributes: []abci.EventAttribute{ - { - Key: []byte("foo"), - Value: []byte("100"), - Index: true, + Events: []abci.Event{ + { + Type: "test_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte("100"), + Index: true, + }, }, }, }, - }, }, } @@ -76,16 +78,18 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Tx: types.Tx("bar"), Result: abci.ResponseDeliverTx{ Code: 0, - Events: []abci.Event{{Type: "test_event", - Attributes: []abci.EventAttribute{ - { - Key: []byte("foo"), - Value: []byte("100"), - Index: true, + Events: []abci.Event{ + { + Type: "test_event", + Attributes: []abci.EventAttribute{ + { + Key: []byte("foo"), + Value: []byte("100"), + Index: true, + }, }, }, }, - }, }, } err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) @@ -115,5 +119,4 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { // 2 indexed tx + indexed 2 events = 4 pruned expectedPruned := uint64(4) require.Equal(t, expectedPruned, pruned) - } diff --git a/indexers/txindex/kv/kv.go b/indexers/txindex/kv/kv.go index 485ba01ea..c98f8d385 100644 --- a/indexers/txindex/kv/kv.go +++ b/indexers/txindex/kv/kv.go @@ -29,20 +29,16 @@ const ( var _ txindex.TxIndexer = (*TxIndex)(nil) - type TxIndex struct { store store.KV } - func NewTxIndex(store store.KV) *TxIndex { return &TxIndex{ store: store, } } - - func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { if len(hash) == 0 { return nil, txindex.ErrorEmptyHash @@ -65,10 +61,6 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return txResult, nil } - - - - func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() defer storeBatch.Discard() @@ -77,13 +69,12 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { for _, result := range b.Ops { hash := types.Tx(result.Tx).Hash() - eventKeys, err := txi.indexEvents(result, hash, storeBatch) if err != nil { return err } eventKeysBatch.Keys = append(eventKeysBatch.Keys, eventKeys.Keys...) - + err = storeBatch.Set(keyForHeight(result), hash) if err != nil { return err @@ -93,7 +84,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { if err != nil { return err } - + err = storeBatch.Set(hash, rawBytes) if err != nil { return err @@ -108,29 +99,22 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return storeBatch.Commit() } - - - - func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Discard() hash := types.Tx(result.Tx).Hash() - eventKeys, err := txi.indexEvents(result, hash, b) if err != nil { return err } - err = txi.addEventKeys(result.Height, &eventKeys, b) if err != nil { return nil } - err = b.Set(keyForHeight(result), hash) if err != nil { return err @@ -140,7 +124,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { if err != nil { return err } - + err = b.Set(hash, rawBytes) if err != nil { return err @@ -152,7 +136,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store.KVBatch) (dmtypes.EventKeys, error) { eventKeys := dmtypes.EventKeys{} for _, event := range result.Result.Events { - + if len(event.Type) == 0 { continue } @@ -162,7 +146,6 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. continue } - compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if attr.GetIndex() { err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) @@ -177,17 +160,6 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store store. return eventKeys, nil } - - - - - - - - - - - func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { select { case <-ctx.Done(): @@ -199,13 +171,11 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul var hashesInitialized bool filteredHashes := make(map[string][]byte) - conditions, err := q.Conditions() if err != nil { return nil, fmt.Errorf("during parsing conditions from query: %w", err) } - hash, ok, err := lookForHash(conditions) if err != nil { return nil, fmt.Errorf("during searching for a hash in the query: %w", err) @@ -221,12 +191,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - skipIndexes := make([]int, 0) - - - ranges, rangeIndexes := indexer.LookForRanges(conditions) if len(ranges) > 0 { skipIndexes = append(skipIndexes, rangeIndexes...) @@ -236,8 +202,6 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.matchRange(ctx, qr, startKey(qr.Key), filteredHashes, true) hashesInitialized = true - - if len(filteredHashes) == 0 { break } @@ -247,10 +211,8 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } } - height := lookForHeight(conditions) - for i, c := range conditions { if intInSlice(i, skipIndexes) { continue @@ -260,8 +222,6 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul filteredHashes = txi.match(ctx, c, startKeyForCondition(c, height), filteredHashes, true) hashesInitialized = true - - if len(filteredHashes) == 0 { break } @@ -283,7 +243,6 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResul } results = append(results, res) - select { case <-ctx.Done(): cont = false @@ -308,7 +267,6 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) return } - func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { if c.CompositeKey == tmtypes.TxHeightKey && c.Op == query.OpEqual { @@ -318,11 +276,6 @@ func lookForHeight(conditions []query.Condition) (height int64) { return 0 } - - - - - func (txi *TxIndex) match( ctx context.Context, c query.Condition, @@ -330,8 +283,6 @@ func (txi *TxIndex) match( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - - if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -348,7 +299,6 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - select { case <-ctx.Done(): cont = false @@ -364,8 +314,7 @@ func (txi *TxIndex) match( } case c.Op == query.OpExists: - - + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -374,7 +323,6 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() - select { case <-ctx.Done(): cont = false @@ -390,9 +338,7 @@ func (txi *TxIndex) match( } case c.Op == query.OpContains: - - - + it := txi.store.PrefixIterator(startKey(c.CompositeKey)) defer it.Discard() @@ -407,7 +353,6 @@ func (txi *TxIndex) match( tmpHashes[string(it.Value())] = it.Value() } - select { case <-ctx.Done(): cont = false @@ -426,25 +371,15 @@ func (txi *TxIndex) match( } if len(tmpHashes) == 0 || firstRun { - - - - - - - return tmpHashes } - - for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - select { case <-ctx.Done(): cont = false @@ -460,11 +395,6 @@ func (txi *TxIndex) match( return filteredHashes } - - - - - func (txi *TxIndex) matchRange( ctx context.Context, qr indexer.QueryRange, @@ -472,8 +402,6 @@ func (txi *TxIndex) matchRange( filteredHashes map[string][]byte, firstRun bool, ) map[string][]byte { - - if !firstRun && len(filteredHashes) == 0 { return filteredHashes } @@ -512,15 +440,8 @@ LOOP: tmpHashes[string(it.Value())] = it.Value() } - - - - - - } - select { case <-ctx.Done(): cont = false @@ -536,25 +457,15 @@ LOOP: } if len(tmpHashes) == 0 || firstRun { - - - - - - - return tmpHashes } - - for k := range filteredHashes { cont := true if tmpHashes[k] == nil { delete(filteredHashes, k) - select { case <-ctx.Done(): cont = false @@ -592,9 +503,8 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 return nil } - for h := int64(from); h < int64(to); h++ { + for h := int64(from); h < int64(to); h++ { - if toFlush > 1000 { err := flush(batch, h) if err != nil { @@ -605,7 +515,6 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 toFlush = 0 } - prunedEvents, err := txi.pruneEvents(h, batch) pruned += prunedEvents toFlush += prunedEvents @@ -614,10 +523,8 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 continue } - it := txi.store.PrefixIterator(prefixForHeight(h)) - for ; it.Valid(); it.Next() { toFlush++ if err := batch.Delete(it.Key()); err != nil { @@ -635,7 +542,7 @@ func (txi *TxIndex) pruneTxsAndEvents(from, to uint64, logger log.Logger) (uint6 } - err := flush(batch, int64(to)) + err := flush(batch, int64(to)) if err != nil { return 0, err } @@ -669,7 +576,6 @@ func (txi *TxIndex) pruneEvents(height int64, batch store.KVBatch) (uint64, erro } func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batch store.KVBatch) error { - eventKeyHeight, err := eventHeightKey(height) if err != nil { return err @@ -684,8 +590,6 @@ func (txi *TxIndex) addEventKeys(height int64, eventKeys *dymint.EventKeys, batc return nil } - - func isTagKey(key []byte) bool { return strings.Count(string(key), tagKeySeparator) == 3 } diff --git a/indexers/txindex/kv/kv_test.go b/indexers/txindex/kv/kv_test.go index abba1995d..394dd9595 100644 --- a/indexers/txindex/kv/kv_test.go +++ b/indexers/txindex/kv/kv_test.go @@ -315,7 +315,6 @@ func TestTxSearchMultipleTxs(t *testing.T) { } func TestTxIndexerPruning(t *testing.T) { - // init the block indexer indexer := NewTxIndex(store.NewDefaultInMemoryKVStore()) numBlocks := uint64(100) @@ -357,7 +356,6 @@ func TestTxIndexerPruning(t *testing.T) { results := indexer.match(context.Background(), c, startKeyForCondition(c, 0), nil, true) require.Equal(t, 0, len(results)) } - } func txResultWithEvents(events []abci.Event) *abci.TxResult { @@ -374,6 +372,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { }, } } + func getRandomTxResult(height int64, events []abci.Event) *abci.TxResult { tx := types.Tx(randomTxHash()) return &abci.TxResult{ diff --git a/indexers/txindex/kv/utils.go b/indexers/txindex/kv/utils.go index 05cb12c90..4f8699cc4 100644 --- a/indexers/txindex/kv/utils.go +++ b/indexers/txindex/kv/utils.go @@ -4,7 +4,6 @@ import "github.com/google/orderedcode" const TxEventHeightKey = "txevent.height" - func intInSlice(a int, list []int) bool { for _, b := range list { if b == a { diff --git a/indexers/txindex/null/null.go b/indexers/txindex/null/null.go index 7d2167389..0a6163db3 100644 --- a/indexers/txindex/null/null.go +++ b/indexers/txindex/null/null.go @@ -13,20 +13,16 @@ import ( var _ txindex.TxIndexer = (*TxIndex)(nil) - type TxIndex struct{} - func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } - func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } - func (txi *TxIndex) Index(result *abci.TxResult) error { return nil } diff --git a/mempool/cache.go b/mempool/cache.go index fdb11ea5b..288e21a4e 100644 --- a/mempool/cache.go +++ b/mempool/cache.go @@ -7,31 +7,18 @@ import ( "github.com/tendermint/tendermint/types" ) - - - - - type TxCache interface { - Reset() - - Push(tx types.Tx) bool - Remove(tx types.Tx) - - Has(tx types.Tx) bool } var _ TxCache = (*LRUTxCache)(nil) - - type LRUTxCache struct { mtx sync.Mutex size int @@ -47,8 +34,6 @@ func NewLRUTxCache(cacheSize int) *LRUTxCache { } } - - func (c *LRUTxCache) GetList() *list.List { return c.list } @@ -109,7 +94,6 @@ func (c *LRUTxCache) Has(tx types.Tx) bool { return ok } - type NopTxCache struct{} var _ TxCache = (*NopTxCache)(nil) diff --git a/mempool/clist/clist.go b/mempool/clist/clist.go index ff94a4b49..8ed48a0bb 100644 --- a/mempool/clist/clist.go +++ b/mempool/clist/clist.go @@ -1,7 +1,5 @@ package clist - - import ( "fmt" "sync" @@ -9,12 +7,8 @@ import ( tmsync "github.com/tendermint/tendermint/libs/sync" ) - - - const MaxLength = int(^uint(0) >> 1) - type CElement struct { mtx tmsync.RWMutex prev *CElement @@ -25,11 +19,9 @@ type CElement struct { nextWaitCh chan struct{} removed bool - Value interface{} + Value interface{} } - - func (e *CElement) NextWait() *CElement { for { e.mtx.RLock() @@ -43,13 +35,10 @@ func (e *CElement) NextWait() *CElement { } nextWg.Wait() - - + } } - - func (e *CElement) PrevWait() *CElement { for { e.mtx.RLock() @@ -66,8 +55,6 @@ func (e *CElement) PrevWait() *CElement { } } - - func (e *CElement) PrevWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -75,8 +62,6 @@ func (e *CElement) PrevWaitChan() <-chan struct{} { return e.prevWaitCh } - - func (e *CElement) NextWaitChan() <-chan struct{} { e.mtx.RLock() defer e.mtx.RUnlock() @@ -84,7 +69,6 @@ func (e *CElement) NextWaitChan() <-chan struct{} { return e.nextWaitCh } - func (e *CElement) Next() *CElement { e.mtx.RLock() val := e.next @@ -92,7 +76,6 @@ func (e *CElement) Next() *CElement { return val } - func (e *CElement) Prev() *CElement { e.mtx.RLock() prev := e.prev @@ -127,20 +110,14 @@ func (e *CElement) DetachPrev() { e.mtx.Unlock() } - - func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() oldNext := e.next e.next = newNext if oldNext != nil && newNext == nil { - - - - - - e.nextWg = waitGroup1() + + e.nextWg = waitGroup1() e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -150,15 +127,13 @@ func (e *CElement) SetNext(newNext *CElement) { e.mtx.Unlock() } - - func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() + e.prevWg = waitGroup1() e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -173,7 +148,6 @@ func (e *CElement) SetRemoved() { e.removed = true - if e.prev == nil { e.prevWg.Done() close(e.prevWaitCh) @@ -185,20 +159,14 @@ func (e *CElement) SetRemoved() { e.mtx.Unlock() } - - - - - - type CList struct { mtx tmsync.RWMutex wg *sync.WaitGroup waitCh chan struct{} - head *CElement - tail *CElement - len int - maxLen int + head *CElement + tail *CElement + len int + maxLen int } func (l *CList) Init() *CList { @@ -213,11 +181,8 @@ func (l *CList) Init() *CList { return l } - func New() *CList { return newWithMax(MaxLength) } - - func newWithMax(maxLength int) *CList { l := new(CList) l.maxLen = maxLength @@ -239,7 +204,6 @@ func (l *CList) Front() *CElement { } func (l *CList) FrontWait() *CElement { - for { l.mtx.RLock() head := l.head @@ -250,7 +214,7 @@ func (l *CList) FrontWait() *CElement { return head } wg.Wait() - + } } @@ -272,13 +236,10 @@ func (l *CList) BackWait() *CElement { return tail } wg.Wait() - - + } } - - func (l *CList) WaitChan() <-chan struct{} { l.mtx.Lock() defer l.mtx.Unlock() @@ -286,11 +247,9 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } - func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - e := &CElement{ prev: nil, prevWg: waitGroup1(), @@ -302,7 +261,6 @@ func (l *CList) PushBack(v interface{}) *CElement { Value: v, } - if l.len == 0 { l.wg.Done() close(l.waitCh) @@ -312,21 +270,18 @@ func (l *CList) PushBack(v interface{}) *CElement { } l.len++ - if l.tail == nil { l.head = e l.tail = e } else { - e.SetPrev(l.tail) - l.tail.SetNext(e) - l.tail = e + e.SetPrev(l.tail) + l.tail.SetNext(e) + l.tail = e } l.mtx.Unlock() return e } - - func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() @@ -346,16 +301,13 @@ func (l *CList) Remove(e *CElement) interface{} { panic("Remove(e) with false tail") } - if l.len == 1 { - l.wg = waitGroup1() + l.wg = waitGroup1() l.waitCh = make(chan struct{}) } - l.len-- - if prev == nil { l.head = next } else { @@ -367,7 +319,6 @@ func (l *CList) Remove(e *CElement) interface{} { next.SetPrev(prev) } - e.SetRemoved() l.mtx.Unlock() diff --git a/mempool/ids.go b/mempool/ids.go index 5afb3bc92..6a26ef9f2 100644 --- a/mempool/ids.go +++ b/mempool/ids.go @@ -1,3 +1 @@ package mempool - - diff --git a/mempool/mempool.go b/mempool/mempool.go index dbbec0e02..d7235e05d 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -13,107 +13,53 @@ import ( const ( MempoolChannel = byte(0x30) - PeerCatchupSleepIntervalMS = 100 - - UnknownPeerID uint16 = 0 MaxActiveIDs = math.MaxUint16 ) - - - - type Mempool interface { - - CheckTx(tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - RemoveTxByKey(txKey types.TxKey) error - - - - - - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs - - - ReapMaxTxs(max int) types.Txs - - Lock() - Unlock() - - - - - - Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error - SetPreCheckFn(fn PreCheckFunc) - SetPostCheckFn(fn PostCheckFunc) - - - - - FlushAppConn() error - Flush() - - - - - TxsAvailable() <-chan struct{} - - EnableTxsAvailable() - Size() int - SizeBytes() int64 } - - - type PreCheckFunc func(types.Tx) error - - - type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error - - func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) @@ -126,8 +72,6 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { } } - - func PostCheckMaxGas(maxGas int64) PostCheckFunc { return func(tx types.Tx, res *abci.ResponseCheckTx) error { if maxGas == -1 { @@ -146,14 +90,10 @@ func PostCheckMaxGas(maxGas int64) PostCheckFunc { } } - var ErrTxInCache = errors.New("tx already exists in cache") - type TxKey [sha256.Size]byte - - type ErrTxTooLarge struct { Max int Actual int @@ -163,8 +103,6 @@ func (e ErrTxTooLarge) Error() string { return fmt.Sprintf("Tx too large. Max size is %d, but got %d", e.Max, e.Actual) } - - type ErrMempoolIsFull struct { NumTxs int MaxTxs int @@ -182,7 +120,6 @@ func (e ErrMempoolIsFull) Error() string { ) } - type ErrPreCheck struct { Reason error } @@ -191,7 +128,6 @@ func (e ErrPreCheck) Error() string { return e.Reason.Error() } - func IsPreCheckError(err error) bool { return errors.As(err, &ErrPreCheck{}) } diff --git a/mempool/metrics.go b/mempool/metrics.go index 613715038..c2e82adcd 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -8,42 +8,23 @@ import ( ) const ( - - MetricsSubsystem = "mempool" ) - - type Metrics struct { - Size metrics.Gauge - TxSizeBytes metrics.Histogram - FailedTxs metrics.Counter - - - - RejectedTxs metrics.Counter - - - - EvictedTxs metrics.Counter - RecheckTimes metrics.Counter } - - - func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { labels := []string{} for i := 0; i < len(labelsAndValues); i += 2 { @@ -95,7 +76,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { } } - func NopMetrics() *Metrics { return &Metrics{ Size: discard.NewGauge(), diff --git a/mempool/mock/mempool.go b/mempool/mock/mempool.go index 014816a9e..eb0cd3ab7 100644 --- a/mempool/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -7,7 +7,6 @@ import ( "github.com/tendermint/tendermint/types" ) - type Mempool struct{} var _ mempool.Mempool = Mempool{} diff --git a/mempool/tx.go b/mempool/tx.go index 191f1cbc0..6bff5f98b 100644 --- a/mempool/tx.go +++ b/mempool/tx.go @@ -4,14 +4,8 @@ import ( "github.com/tendermint/tendermint/p2p" ) - - type TxInfo struct { - - - SenderID uint16 - SenderP2PID p2p.ID } diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index a543b64ab..39ce4178c 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -20,45 +20,30 @@ import ( var _ mempool.Mempool = (*TxMempool)(nil) - type TxMempoolOption func(*TxMempool) - - - - - - - - - type TxMempool struct { - logger log.Logger config *config.MempoolConfig proxyAppConn proxy.AppConnMempool metrics *mempool.Metrics - cache mempool.TxCache + cache mempool.TxCache - - txsBytes int64 - txRecheck int64 + txsBytes int64 + txRecheck int64 - mtx *sync.RWMutex notifiedTxsAvailable bool - txsAvailable chan struct{} + txsAvailable chan struct{} preCheck mempool.PreCheckFunc postCheck mempool.PostCheckFunc - height int64 + height int64 - txs *clist.CList + txs *clist.CList txByKey map[types.TxKey]*clist.CElement - txBySender map[string]*clist.CElement + txBySender map[string]*clist.CElement } - - func NewTxMempool( logger log.Logger, cfg *config.MempoolConfig, @@ -91,59 +76,33 @@ func NewTxMempool( return txmp } - - - func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.preCheck = f } } - - - func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { return func(txmp *TxMempool) { txmp.postCheck = f } } - func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { return func(txmp *TxMempool) { txmp.metrics = metrics } } - - func (txmp *TxMempool) Lock() { txmp.mtx.Lock() } - func (txmp *TxMempool) Unlock() { txmp.mtx.Unlock() } - - func (txmp *TxMempool) Size() int { return txmp.txs.Len() } - - func (txmp *TxMempool) SizeBytes() int64 { return atomic.LoadInt64(&txmp.txsBytes) } - - - - func (txmp *TxMempool) FlushAppConn() error { - - - - - - txmp.mtx.Unlock() defer txmp.mtx.Lock() return txmp.proxyAppConn.FlushSync() } - - func (txmp *TxMempool) EnableTxsAvailable() { txmp.mtx.Lock() defer txmp.mtx.Unlock() @@ -151,60 +110,31 @@ func (txmp *TxMempool) EnableTxsAvailable() { txmp.txsAvailable = make(chan struct{}, 1) } - - func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } - - - - - - - - - - - - - - - - - - - - func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo mempool.TxInfo) error { - - - height, err := func() (int64, error) { txmp.mtx.RLock() defer txmp.mtx.RUnlock() - if len(tx) > txmp.config.MaxTxBytes { return 0, mempool.ErrTxTooLarge{Max: txmp.config.MaxTxBytes, Actual: len(tx)} } - if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { return 0, mempool.ErrPreCheck{Reason: err} } } - if err := txmp.proxyAppConn.Error(); err != nil { return 0, err } txKey := tx.Key() - if !txmp.cache.Push(tx) { - + if elt, ok := txmp.txByKey[txKey]; ok { w, _ := elt.Value.(*WrappedTx) w.SetPeer(txInfo.SenderID) @@ -217,13 +147,6 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return err } - - - - - - - reqRes := txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) if err := txmp.proxyAppConn.FlushSync(); err != nil { return err @@ -244,17 +167,12 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp return nil } - - - func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { txmp.mtx.Lock() defer txmp.mtx.Unlock() return txmp.removeTxByKey(txKey) } - - func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { if elt, ok := txmp.txByKey[key]; ok { w, _ := elt.Value.(*WrappedTx) @@ -269,8 +187,6 @@ func (txmp *TxMempool) removeTxByKey(key types.TxKey) error { return fmt.Errorf("transaction %x not found", key) } - - func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { w, _ := elt.Value.(*WrappedTx) delete(txmp.txByKey, w.tx.Key()) @@ -281,14 +197,10 @@ func (txmp *TxMempool) removeTxByElement(elt *clist.CElement) { atomic.AddInt64(&txmp.txsBytes, -w.Size()) } - - func (txmp *TxMempool) Flush() { txmp.mtx.Lock() defer txmp.mtx.Unlock() - - cur := txmp.txs.Front() for cur != nil { next := cur.Next() @@ -297,14 +209,9 @@ func (txmp *TxMempool) Flush() { } txmp.cache.Reset() - - atomic.StoreInt64(&txmp.txRecheck, 0) } - - - func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { txmp.mtx.RLock() defer txmp.mtx.RUnlock() @@ -317,28 +224,17 @@ func (txmp *TxMempool) allEntriesSorted() []*WrappedTx { if all[i].priority == all[j].priority { return all[i].timestamp.Before(all[j].timestamp) } - return all[i].priority > all[j].priority + return all[i].priority > all[j].priority }) return all } - - - - - - - - - - func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { var totalGas, totalBytes int64 - var keep []types.Tx + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { - - + totalGas += w.gasWanted totalBytes += types.ComputeProtoSizeForTxs([]types.Tx{w.tx}) if (maxGas >= 0 && totalGas > maxGas) || (maxBytes >= 0 && totalBytes > maxBytes) { @@ -349,24 +245,12 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { return keep } - - func (txmp *TxMempool) TxsWaitChan() <-chan struct{} { return txmp.txs.WaitChan() } - - func (txmp *TxMempool) TxsFront() *clist.CElement { return txmp.txs.Front() } - - - - - - - - func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - var keep []types.Tx + var keep []types.Tx for _, w := range txmp.allEntriesSorted() { if max >= 0 && len(keep) >= max { @@ -377,28 +261,16 @@ func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { return keep } - - - - - - - - - - - func (txmp *TxMempool) Update( blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, ) error { - if txmp.mtx.TryLock() { txmp.mtx.Unlock() panic("mempool: Update caller does not hold the lock") } - + if len(blockTxs) != len(deliverTxResponses) { panic(fmt.Sprintf("mempool: got %d transactions but %d DeliverTx responses", len(blockTxs), len(deliverTxResponses))) @@ -408,24 +280,18 @@ func (txmp *TxMempool) Update( txmp.notifiedTxsAvailable = false for i, tx := range blockTxs { - - - + if deliverTxResponses[i].Code == abci.CodeTypeOK { _ = txmp.cache.Push(tx) } else if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(tx) } - _ = txmp.removeTxByKey(tx.Key()) } txmp.purgeExpiredTxs(blockHeight) - - - size := txmp.Size() txmp.metrics.Size.Set(float64(size)) if size > 0 { @@ -446,19 +312,6 @@ func (txmp *TxMempool) SetPostCheckFn(fn mempool.PostCheckFunc) { txmp.postCheck = fn } - - - - - - - - - - - - - func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { @@ -490,14 +343,10 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.metrics.FailedTxs.Add(1) - - if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) } - - if err != nil { checkTxRes.CheckTx.MempoolError = err.Error() } @@ -507,9 +356,6 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { priority := checkTxRes.CheckTx.Priority sender := checkTxRes.CheckTx.Sender - - - if sender != "" { elt, ok := txmp.txBySender[sender] if ok { @@ -526,15 +372,9 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - - - - - - if err := txmp.canAddTx(wtx); err != nil { - var victims []*clist.CElement - var victimBytes int64 + var victims []*clist.CElement + var victimBytes int64 for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { cw := cur.Value.(*WrappedTx) if cw.priority < priority { @@ -543,9 +383,6 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { } } - - - if len(victims) == 0 || victimBytes < wtx.Size() { txmp.cache.Remove(wtx.tx) txmp.logger.Error( @@ -564,8 +401,6 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { "new_priority", priority, ) - - sort.Slice(victims, func(i, j int) bool { iw := victims[i].Value.(*WrappedTx) jw := victims[j].Value.(*WrappedTx) @@ -575,7 +410,6 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { return iw.Priority() < jw.Priority() }) - var evictedBytes int64 for _, vic := range victims { w := vic.Value.(*WrappedTx) @@ -589,8 +423,6 @@ func (txmp *TxMempool) initialTxCallback(wtx *WrappedTx, res *abci.Response) { txmp.cache.Remove(w.tx) txmp.metrics.EvictedTxs.Add(1) - - evictedBytes += w.Size() if evictedBytes >= wtx.Size() { break @@ -625,26 +457,15 @@ func (txmp *TxMempool) insertTx(wtx *WrappedTx) { atomic.AddInt64(&txmp.txsBytes, wtx.Size()) } - - - - - - func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) { checkTxRes, ok := res.Value.(*abci.Response_CheckTx) if !ok { - - return } - - - numLeft := atomic.AddInt64(&txmp.txRecheck, -1) if numLeft == 0 { - defer txmp.notifyTxsAvailable() + defer txmp.notifyTxsAvailable() } else if numLeft < 0 { return } @@ -655,16 +476,12 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.mtx.Lock() defer txmp.mtx.Unlock() - - - elt, ok := txmp.txByKey[tx.Key()] if !ok { return } wtx := elt.Value.(*WrappedTx) - var err error if txmp.postCheck != nil { err = txmp.postCheck(tx, checkTxRes.CheckTx) @@ -672,7 +489,7 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { wtx.SetPriority(checkTxRes.CheckTx.Priority) - return + return } txmp.logger.Debug( @@ -690,12 +507,6 @@ func (txmp *TxMempool) recheckTxCallback(req *abci.Request, res *abci.Response) txmp.metrics.Size.Set(float64(txmp.Size())) } - - - - - - func (txmp *TxMempool) recheckTransactions() { if txmp.Size() == 0 { panic("mempool: cannot run recheck on an empty mempool") @@ -705,10 +516,7 @@ func (txmp *TxMempool) recheckTransactions() { "num_txs", txmp.Size(), "height", txmp.height, ) - - - - + txmp.mtx.Unlock() defer txmp.mtx.Lock() @@ -716,7 +524,6 @@ func (txmp *TxMempool) recheckTransactions() { for e := txmp.txs.Front(); e != nil; e = e.Next() { wtx := e.Value.(*WrappedTx) - _ = txmp.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{ Tx: wtx.tx, Type: abci.CheckTxType_Recheck, @@ -730,9 +537,6 @@ func (txmp *TxMempool) recheckTransactions() { txmp.proxyAppConn.FlushAsync() } - - - func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { numTxs := txmp.Size() txBytes := txmp.SizeBytes() @@ -749,21 +553,15 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { return nil } - - - - - func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { if txmp.config.TTLNumBlocks == 0 && txmp.config.TTLDuration == 0 { - return + return } now := time.Now() cur := txmp.txs.Front() for cur != nil { - - + next := cur.Next() w := cur.Value.(*WrappedTx) @@ -782,11 +580,11 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { - return + return } if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - + txmp.notifiedTxsAvailable = true select { diff --git a/mempool/v1/tx.go b/mempool/v1/tx.go index 88134c052..6c1c7ad35 100644 --- a/mempool/v1/tx.go +++ b/mempool/v1/tx.go @@ -7,25 +7,21 @@ import ( "github.com/tendermint/tendermint/types" ) - - type WrappedTx struct { - tx types.Tx - hash types.TxKey - height int64 - timestamp time.Time + tx types.Tx + hash types.TxKey + height int64 + timestamp time.Time mtx sync.Mutex - gasWanted int64 - priority int64 - sender string - peers map[uint16]bool + gasWanted int64 + priority int64 + sender string + peers map[uint16]bool } - func (w *WrappedTx) Size() int64 { return int64(len(w.tx)) } - func (w *WrappedTx) SetPeer(id uint16) { w.mtx.Lock() defer w.mtx.Unlock() @@ -36,7 +32,6 @@ func (w *WrappedTx) SetPeer(id uint16) { } } - func (w *WrappedTx) HasPeer(id uint16) bool { w.mtx.Lock() defer w.mtx.Unlock() @@ -44,42 +39,36 @@ func (w *WrappedTx) HasPeer(id uint16) bool { return ok } - func (w *WrappedTx) SetGasWanted(gas int64) { w.mtx.Lock() defer w.mtx.Unlock() w.gasWanted = gas } - func (w *WrappedTx) GasWanted() int64 { w.mtx.Lock() defer w.mtx.Unlock() return w.gasWanted } - func (w *WrappedTx) SetSender(sender string) { w.mtx.Lock() defer w.mtx.Unlock() w.sender = sender } - func (w *WrappedTx) Sender() string { w.mtx.Lock() defer w.mtx.Unlock() return w.sender } - func (w *WrappedTx) SetPriority(p int64) { w.mtx.Lock() defer w.mtx.Unlock() w.priority = p } - func (w *WrappedTx) Priority() int64 { w.mtx.Lock() defer w.mtx.Unlock() diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go index 6098f6c98..54b0c0c24 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_ExecutorI.go @@ -1,5 +1,3 @@ - - package block import ( @@ -16,7 +14,6 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - type MockExecutorI struct { mock.Mock } @@ -29,7 +26,6 @@ func (_m *MockExecutorI) EXPECT() *MockExecutorI_Expecter { return &MockExecutorI_Expecter{mock: &_m.Mock} } - func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _va := make([]interface{}, len(_a0)) for _i := range _a0 { @@ -40,13 +36,10 @@ func (_m *MockExecutorI) AddConsensusMsgs(_a0 ...proto.Message) { _m.Called(_ca...) } - type MockExecutorI_AddConsensusMsgs_Call struct { *mock.Call } - - func (_e *MockExecutorI_Expecter) AddConsensusMsgs(_a0 ...interface{}) *MockExecutorI_AddConsensusMsgs_Call { return &MockExecutorI_AddConsensusMsgs_Call{Call: _e.mock.On("AddConsensusMsgs", append([]interface{}{}, _a0...)...)} @@ -75,7 +68,6 @@ func (_c *MockExecutorI_AddConsensusMsgs_Call) RunAndReturn(run func(...proto.Me return _c } - func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state.ABCIResponses) ([]byte, int64, error) { ret := _m.Called(_a0, _a1, resp) @@ -112,15 +104,10 @@ func (_m *MockExecutorI) Commit(_a0 *types.State, _a1 *types.Block, resp *state. return r0, r1, r2 } - type MockExecutorI_Commit_Call struct { *mock.Call } - - - - func (_e *MockExecutorI_Expecter) Commit(_a0 interface{}, _a1 interface{}, resp interface{}) *MockExecutorI_Commit_Call { return &MockExecutorI_Commit_Call{Call: _e.mock.On("Commit", _a0, _a1, resp)} } @@ -142,7 +129,6 @@ func (_c *MockExecutorI_Commit_Call) RunAndReturn(run func(*types.State, *types. return _c } - func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, lastHeaderHash [32]byte, nextSeqHash [32]byte, _a4 *types.State, maxBlockDataSizeBytes uint64) *types.Block { ret := _m.Called(height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes) @@ -162,18 +148,10 @@ func (_m *MockExecutorI) CreateBlock(height uint64, lastCommit *types.Commit, la return r0 } - type MockExecutorI_CreateBlock_Call struct { *mock.Call } - - - - - - - func (_e *MockExecutorI_Expecter) CreateBlock(height interface{}, lastCommit interface{}, lastHeaderHash interface{}, nextSeqHash interface{}, _a4 interface{}, maxBlockDataSizeBytes interface{}) *MockExecutorI_CreateBlock_Call { return &MockExecutorI_CreateBlock_Call{Call: _e.mock.On("CreateBlock", height, lastCommit, lastHeaderHash, nextSeqHash, _a4, maxBlockDataSizeBytes)} } @@ -195,7 +173,6 @@ func (_c *MockExecutorI_CreateBlock_Call) RunAndReturn(run func(uint64, *types.C return _c } - func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, error) { ret := _m.Called(_a0) @@ -225,13 +202,10 @@ func (_m *MockExecutorI) ExecuteBlock(_a0 *types.Block) (*state.ABCIResponses, e return r0, r1 } - type MockExecutorI_ExecuteBlock_Call struct { *mock.Call } - - func (_e *MockExecutorI_Expecter) ExecuteBlock(_a0 interface{}) *MockExecutorI_ExecuteBlock_Call { return &MockExecutorI_ExecuteBlock_Call{Call: _e.mock.On("ExecuteBlock", _a0)} } @@ -253,7 +227,6 @@ func (_c *MockExecutorI_ExecuteBlock_Call) RunAndReturn(run func(*types.Block) ( return _c } - func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { ret := _m.Called() @@ -283,12 +256,10 @@ func (_m *MockExecutorI) GetAppInfo() (*abcitypes.ResponseInfo, error) { return r0, r1 } - type MockExecutorI_GetAppInfo_Call struct { *mock.Call } - func (_e *MockExecutorI_Expecter) GetAppInfo() *MockExecutorI_GetAppInfo_Call { return &MockExecutorI_GetAppInfo_Call{Call: _e.mock.On("GetAppInfo")} } @@ -310,7 +281,6 @@ func (_c *MockExecutorI_GetAppInfo_Call) RunAndReturn(run func() (*abcitypes.Res return _c } - func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { ret := _m.Called() @@ -330,12 +300,10 @@ func (_m *MockExecutorI) GetConsensusMsgs() []proto.Message { return r0 } - type MockExecutorI_GetConsensusMsgs_Call struct { *mock.Call } - func (_e *MockExecutorI_Expecter) GetConsensusMsgs() *MockExecutorI_GetConsensusMsgs_Call { return &MockExecutorI_GetConsensusMsgs_Call{Call: _e.mock.On("GetConsensusMsgs")} } @@ -357,7 +325,6 @@ func (_c *MockExecutorI_GetConsensusMsgs_Call) RunAndReturn(run func() []proto.M return _c } - func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisChecksum string, valset []*tenderminttypes.Validator) (*abcitypes.ResponseInitChain, error) { ret := _m.Called(genesis, genesisChecksum, valset) @@ -387,15 +354,10 @@ func (_m *MockExecutorI) InitChain(genesis *tenderminttypes.GenesisDoc, genesisC return r0, r1 } - type MockExecutorI_InitChain_Call struct { *mock.Call } - - - - func (_e *MockExecutorI_Expecter) InitChain(genesis interface{}, genesisChecksum interface{}, valset interface{}) *MockExecutorI_InitChain_Call { return &MockExecutorI_InitChain_Call{Call: _e.mock.On("InitChain", genesis, genesisChecksum, valset)} } @@ -417,18 +379,14 @@ func (_c *MockExecutorI_InitChain_Call) RunAndReturn(run func(*tenderminttypes.G return _c } - func (_m *MockExecutorI) UpdateMempoolAfterInitChain(s *types.State) { _m.Called(s) } - type MockExecutorI_UpdateMempoolAfterInitChain_Call struct { *mock.Call } - - func (_e *MockExecutorI_Expecter) UpdateMempoolAfterInitChain(s interface{}) *MockExecutorI_UpdateMempoolAfterInitChain_Call { return &MockExecutorI_UpdateMempoolAfterInitChain_Call{Call: _e.mock.On("UpdateMempoolAfterInitChain", s)} } @@ -450,7 +408,6 @@ func (_c *MockExecutorI_UpdateMempoolAfterInitChain_Call) RunAndReturn(run func( return _c } - func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.SequencerSet, _a2 *types.Block) bool { ret := _m.Called(s, seqSet, _a2) @@ -468,15 +425,10 @@ func (_m *MockExecutorI) UpdateProposerFromBlock(s *types.State, seqSet *types.S return r0 } - type MockExecutorI_UpdateProposerFromBlock_Call struct { *mock.Call } - - - - func (_e *MockExecutorI_Expecter) UpdateProposerFromBlock(s interface{}, seqSet interface{}, _a2 interface{}) *MockExecutorI_UpdateProposerFromBlock_Call { return &MockExecutorI_UpdateProposerFromBlock_Call{Call: _e.mock.On("UpdateProposerFromBlock", s, seqSet, _a2)} } @@ -498,22 +450,14 @@ func (_c *MockExecutorI_UpdateProposerFromBlock_Call) RunAndReturn(run func(*typ return _c } - func (_m *MockExecutorI) UpdateStateAfterCommit(s *types.State, resp *state.ABCIResponses, appHash []byte, height uint64, lastHeaderHash [32]byte) { _m.Called(s, resp, appHash, height, lastHeaderHash) } - type MockExecutorI_UpdateStateAfterCommit_Call struct { *mock.Call } - - - - - - func (_e *MockExecutorI_Expecter) UpdateStateAfterCommit(s interface{}, resp interface{}, appHash interface{}, height interface{}, lastHeaderHash interface{}) *MockExecutorI_UpdateStateAfterCommit_Call { return &MockExecutorI_UpdateStateAfterCommit_Call{Call: _e.mock.On("UpdateStateAfterCommit", s, resp, appHash, height, lastHeaderHash)} } @@ -535,19 +479,14 @@ func (_c *MockExecutorI_UpdateStateAfterCommit_Call) RunAndReturn(run func(*type return _c } - func (_m *MockExecutorI) UpdateStateAfterInitChain(s *types.State, res *abcitypes.ResponseInitChain) { _m.Called(s, res) } - type MockExecutorI_UpdateStateAfterInitChain_Call struct { *mock.Call } - - - func (_e *MockExecutorI_Expecter) UpdateStateAfterInitChain(s interface{}, res interface{}) *MockExecutorI_UpdateStateAfterInitChain_Call { return &MockExecutorI_UpdateStateAfterInitChain_Call{Call: _e.mock.On("UpdateStateAfterInitChain", s, res)} } @@ -569,12 +508,11 @@ func (_c *MockExecutorI_UpdateStateAfterInitChain_Call) RunAndReturn(run func(*t return _c } - - func NewMockExecutorI(t interface { mock.TestingT Cleanup(func()) -}) *MockExecutorI { +}, +) *MockExecutorI { mock := &MockExecutorI{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go index 54b9098d2..77397e045 100644 --- a/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go +++ b/mocks/github.com/dymensionxyz/dymint/block/mock_FraudHandler.go @@ -1,5 +1,3 @@ - - package block import ( @@ -8,7 +6,6 @@ import ( mock "github.com/stretchr/testify/mock" ) - type MockFraudHandler struct { mock.Mock } @@ -21,19 +18,14 @@ func (_m *MockFraudHandler) EXPECT() *MockFraudHandler_Expecter { return &MockFraudHandler_Expecter{mock: &_m.Mock} } - func (_m *MockFraudHandler) HandleFault(ctx context.Context, fault error) { _m.Called(ctx, fault) } - type MockFraudHandler_HandleFault_Call struct { *mock.Call } - - - func (_e *MockFraudHandler_Expecter) HandleFault(ctx interface{}, fault interface{}) *MockFraudHandler_HandleFault_Call { return &MockFraudHandler_HandleFault_Call{Call: _e.mock.On("HandleFault", ctx, fault)} } @@ -55,12 +47,11 @@ func (_c *MockFraudHandler_HandleFault_Call) RunAndReturn(run func(context.Conte return _c } - - func NewMockFraudHandler(t interface { mock.TestingT Cleanup(func()) -}) *MockFraudHandler { +}, +) *MockFraudHandler { mock := &MockFraudHandler{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go index b591d3572..8813ef9f4 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go +++ b/mocks/github.com/dymensionxyz/dymint/da/avail/mock_SubstrateApiI.go @@ -1,5 +1,3 @@ - - package avail import ( @@ -14,7 +12,6 @@ import ( types "github.com/centrifuge/go-substrate-rpc-client/v4/types" ) - type MockSubstrateApiI struct { mock.Mock } @@ -27,7 +24,6 @@ func (_m *MockSubstrateApiI) EXPECT() *MockSubstrateApiI_Expecter { return &MockSubstrateApiI_Expecter{mock: &_m.Mock} } - func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, error) { ret := _m.Called(blockHash) @@ -57,13 +53,10 @@ func (_m *MockSubstrateApiI) GetBlock(blockHash types.Hash) (*types.SignedBlock, return r0, r1 } - type MockSubstrateApiI_GetBlock_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetBlock(blockHash interface{}) *MockSubstrateApiI_GetBlock_Call { return &MockSubstrateApiI_GetBlock_Call{Call: _e.mock.On("GetBlock", blockHash)} } @@ -85,7 +78,6 @@ func (_c *MockSubstrateApiI_GetBlock_Call) RunAndReturn(run func(types.Hash) (*t return _c } - func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error) { ret := _m.Called(blockNumber) @@ -115,13 +107,10 @@ func (_m *MockSubstrateApiI) GetBlockHash(blockNumber uint64) (types.Hash, error return r0, r1 } - type MockSubstrateApiI_GetBlockHash_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetBlockHash(blockNumber interface{}) *MockSubstrateApiI_GetBlockHash_Call { return &MockSubstrateApiI_GetBlockHash_Call{Call: _e.mock.On("GetBlockHash", blockNumber)} } @@ -143,7 +132,6 @@ func (_c *MockSubstrateApiI_GetBlockHash_Call) RunAndReturn(run func(uint64) (ty return _c } - func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { ret := _m.Called() @@ -173,12 +161,10 @@ func (_m *MockSubstrateApiI) GetBlockHashLatest() (types.Hash, error) { return r0, r1 } - type MockSubstrateApiI_GetBlockHashLatest_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetBlockHashLatest() *MockSubstrateApiI_GetBlockHashLatest_Call { return &MockSubstrateApiI_GetBlockHashLatest_Call{Call: _e.mock.On("GetBlockHashLatest")} } @@ -200,7 +186,6 @@ func (_c *MockSubstrateApiI_GetBlockHashLatest_Call) RunAndReturn(run func() (ty return _c } - func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { ret := _m.Called() @@ -230,12 +215,10 @@ func (_m *MockSubstrateApiI) GetBlockLatest() (*types.SignedBlock, error) { return r0, r1 } - type MockSubstrateApiI_GetBlockLatest_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetBlockLatest() *MockSubstrateApiI_GetBlockLatest_Call { return &MockSubstrateApiI_GetBlockLatest_Call{Call: _e.mock.On("GetBlockLatest")} } @@ -257,7 +240,6 @@ func (_c *MockSubstrateApiI_GetBlockLatest_Call) RunAndReturn(run func() (*types return _c } - func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix, blockHash) @@ -287,15 +269,10 @@ func (_m *MockSubstrateApiI) GetChildKeys(childStorageKey types.StorageKey, pref return r0, r1 } - type MockSubstrateApiI_GetChildKeys_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetChildKeys(childStorageKey interface{}, prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildKeys_Call { return &MockSubstrateApiI_GetChildKeys_Call{Call: _e.mock.On("GetChildKeys", childStorageKey, prefix, blockHash)} } @@ -317,7 +294,6 @@ func (_c *MockSubstrateApiI_GetChildKeys_Call) RunAndReturn(run func(types.Stora return _c } - func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey, prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(childStorageKey, prefix) @@ -347,14 +323,10 @@ func (_m *MockSubstrateApiI) GetChildKeysLatest(childStorageKey types.StorageKey return r0, r1 } - type MockSubstrateApiI_GetChildKeysLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetChildKeysLatest(childStorageKey interface{}, prefix interface{}) *MockSubstrateApiI_GetChildKeysLatest_Call { return &MockSubstrateApiI_GetChildKeysLatest_Call{Call: _e.mock.On("GetChildKeysLatest", childStorageKey, prefix)} } @@ -376,7 +348,6 @@ func (_c *MockSubstrateApiI_GetChildKeysLatest_Call) RunAndReturn(run func(types return _c } - func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(childStorageKey, key, target, blockHash) @@ -404,16 +375,10 @@ func (_m *MockSubstrateApiI) GetChildStorage(childStorageKey types.StorageKey, k return r0, r1 } - type MockSubstrateApiI_GetChildStorage_Call struct { *mock.Call } - - - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorage(childStorageKey interface{}, key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorage_Call { return &MockSubstrateApiI_GetChildStorage_Call{Call: _e.mock.On("GetChildStorage", childStorageKey, key, target, blockHash)} } @@ -435,7 +400,6 @@ func (_c *MockSubstrateApiI_GetChildStorage_Call) RunAndReturn(run func(types.St return _c } - func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -465,15 +429,10 @@ func (_m *MockSubstrateApiI) GetChildStorageHash(childStorageKey types.StorageKe return r0, r1 } - type MockSubstrateApiI_GetChildStorageHash_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageHash(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageHash_Call { return &MockSubstrateApiI_GetChildStorageHash_Call{Call: _e.mock.On("GetChildStorageHash", childStorageKey, key, blockHash)} } @@ -495,7 +454,6 @@ func (_c *MockSubstrateApiI_GetChildStorageHash_Call) RunAndReturn(run func(type return _c } - func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.Hash, error) { ret := _m.Called(childStorageKey, key) @@ -525,14 +483,10 @@ func (_m *MockSubstrateApiI) GetChildStorageHashLatest(childStorageKey types.Sto return r0, r1 } - type MockSubstrateApiI_GetChildStorageHashLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageHashLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageHashLatest_Call { return &MockSubstrateApiI_GetChildStorageHashLatest_Call{Call: _e.mock.On("GetChildStorageHashLatest", childStorageKey, key)} } @@ -554,7 +508,6 @@ func (_c *MockSubstrateApiI_GetChildStorageHashLatest_Call) RunAndReturn(run fun return _c } - func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.StorageKey, key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(childStorageKey, key, target) @@ -582,15 +535,10 @@ func (_m *MockSubstrateApiI) GetChildStorageLatest(childStorageKey types.Storage return r0, r1 } - type MockSubstrateApiI_GetChildStorageLatest_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageLatest(childStorageKey interface{}, key interface{}, target interface{}) *MockSubstrateApiI_GetChildStorageLatest_Call { return &MockSubstrateApiI_GetChildStorageLatest_Call{Call: _e.mock.On("GetChildStorageLatest", childStorageKey, key, target)} } @@ -612,7 +560,6 @@ func (_c *MockSubstrateApiI_GetChildStorageLatest_Call) RunAndReturn(run func(ty return _c } - func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -642,15 +589,10 @@ func (_m *MockSubstrateApiI) GetChildStorageRaw(childStorageKey types.StorageKey return r0, r1 } - type MockSubstrateApiI_GetChildStorageRaw_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageRaw(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageRaw_Call { return &MockSubstrateApiI_GetChildStorageRaw_Call{Call: _e.mock.On("GetChildStorageRaw", childStorageKey, key, blockHash)} } @@ -672,7 +614,6 @@ func (_c *MockSubstrateApiI_GetChildStorageRaw_Call) RunAndReturn(run func(types return _c } - func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.StorageKey, key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(childStorageKey, key) @@ -702,14 +643,10 @@ func (_m *MockSubstrateApiI) GetChildStorageRawLatest(childStorageKey types.Stor return r0, r1 } - type MockSubstrateApiI_GetChildStorageRawLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageRawLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageRawLatest_Call { return &MockSubstrateApiI_GetChildStorageRawLatest_Call{Call: _e.mock.On("GetChildStorageRawLatest", childStorageKey, key)} } @@ -731,7 +668,6 @@ func (_c *MockSubstrateApiI_GetChildStorageRawLatest_Call) RunAndReturn(run func return _c } - func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKey, key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(childStorageKey, key, blockHash) @@ -759,15 +695,10 @@ func (_m *MockSubstrateApiI) GetChildStorageSize(childStorageKey types.StorageKe return r0, r1 } - type MockSubstrateApiI_GetChildStorageSize_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageSize(childStorageKey interface{}, key interface{}, blockHash interface{}) *MockSubstrateApiI_GetChildStorageSize_Call { return &MockSubstrateApiI_GetChildStorageSize_Call{Call: _e.mock.On("GetChildStorageSize", childStorageKey, key, blockHash)} } @@ -789,7 +720,6 @@ func (_c *MockSubstrateApiI_GetChildStorageSize_Call) RunAndReturn(run func(type return _c } - func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.StorageKey, key types.StorageKey) (types.U64, error) { ret := _m.Called(childStorageKey, key) @@ -817,14 +747,10 @@ func (_m *MockSubstrateApiI) GetChildStorageSizeLatest(childStorageKey types.Sto return r0, r1 } - type MockSubstrateApiI_GetChildStorageSizeLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetChildStorageSizeLatest(childStorageKey interface{}, key interface{}) *MockSubstrateApiI_GetChildStorageSizeLatest_Call { return &MockSubstrateApiI_GetChildStorageSizeLatest_Call{Call: _e.mock.On("GetChildStorageSizeLatest", childStorageKey, key)} } @@ -846,7 +772,6 @@ func (_c *MockSubstrateApiI_GetChildStorageSizeLatest_Call) RunAndReturn(run fun return _c } - func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { ret := _m.Called() @@ -876,12 +801,10 @@ func (_m *MockSubstrateApiI) GetFinalizedHead() (types.Hash, error) { return r0, r1 } - type MockSubstrateApiI_GetFinalizedHead_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetFinalizedHead() *MockSubstrateApiI_GetFinalizedHead_Call { return &MockSubstrateApiI_GetFinalizedHead_Call{Call: _e.mock.On("GetFinalizedHead")} } @@ -903,7 +826,6 @@ func (_c *MockSubstrateApiI_GetFinalizedHead_Call) RunAndReturn(run func() (type return _c } - func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, error) { ret := _m.Called(blockHash) @@ -933,13 +855,10 @@ func (_m *MockSubstrateApiI) GetHeader(blockHash types.Hash) (*types.Header, err return r0, r1 } - type MockSubstrateApiI_GetHeader_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetHeader(blockHash interface{}) *MockSubstrateApiI_GetHeader_Call { return &MockSubstrateApiI_GetHeader_Call{Call: _e.mock.On("GetHeader", blockHash)} } @@ -961,7 +880,6 @@ func (_c *MockSubstrateApiI_GetHeader_Call) RunAndReturn(run func(types.Hash) (* return _c } - func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { ret := _m.Called() @@ -991,12 +909,10 @@ func (_m *MockSubstrateApiI) GetHeaderLatest() (*types.Header, error) { return r0, r1 } - type MockSubstrateApiI_GetHeaderLatest_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetHeaderLatest() *MockSubstrateApiI_GetHeaderLatest_Call { return &MockSubstrateApiI_GetHeaderLatest_Call{Call: _e.mock.On("GetHeaderLatest")} } @@ -1018,7 +934,6 @@ func (_c *MockSubstrateApiI_GetHeaderLatest_Call) RunAndReturn(run func() (*type return _c } - func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Hash) ([]types.StorageKey, error) { ret := _m.Called(prefix, blockHash) @@ -1048,14 +963,10 @@ func (_m *MockSubstrateApiI) GetKeys(prefix types.StorageKey, blockHash types.Ha return r0, r1 } - type MockSubstrateApiI_GetKeys_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetKeys(prefix interface{}, blockHash interface{}) *MockSubstrateApiI_GetKeys_Call { return &MockSubstrateApiI_GetKeys_Call{Call: _e.mock.On("GetKeys", prefix, blockHash)} } @@ -1077,7 +988,6 @@ func (_c *MockSubstrateApiI_GetKeys_Call) RunAndReturn(run func(types.StorageKey return _c } - func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.StorageKey, error) { ret := _m.Called(prefix) @@ -1107,13 +1017,10 @@ func (_m *MockSubstrateApiI) GetKeysLatest(prefix types.StorageKey) ([]types.Sto return r0, r1 } - type MockSubstrateApiI_GetKeysLatest_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetKeysLatest(prefix interface{}) *MockSubstrateApiI_GetKeysLatest_Call { return &MockSubstrateApiI_GetKeysLatest_Call{Call: _e.mock.On("GetKeysLatest", prefix)} } @@ -1135,7 +1042,6 @@ func (_c *MockSubstrateApiI_GetKeysLatest_Call) RunAndReturn(run func(types.Stor return _c } - func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, error) { ret := _m.Called(blockHash) @@ -1165,13 +1071,10 @@ func (_m *MockSubstrateApiI) GetMetadata(blockHash types.Hash) (*types.Metadata, return r0, r1 } - type MockSubstrateApiI_GetMetadata_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetMetadata(blockHash interface{}) *MockSubstrateApiI_GetMetadata_Call { return &MockSubstrateApiI_GetMetadata_Call{Call: _e.mock.On("GetMetadata", blockHash)} } @@ -1193,7 +1096,6 @@ func (_c *MockSubstrateApiI_GetMetadata_Call) RunAndReturn(run func(types.Hash) return _c } - func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { ret := _m.Called() @@ -1223,12 +1125,10 @@ func (_m *MockSubstrateApiI) GetMetadataLatest() (*types.Metadata, error) { return r0, r1 } - type MockSubstrateApiI_GetMetadataLatest_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetMetadataLatest() *MockSubstrateApiI_GetMetadataLatest_Call { return &MockSubstrateApiI_GetMetadataLatest_Call{Call: _e.mock.On("GetMetadataLatest")} } @@ -1250,7 +1150,6 @@ func (_c *MockSubstrateApiI_GetMetadataLatest_Call) RunAndReturn(run func() (*ty return _c } - func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.RuntimeVersion, error) { ret := _m.Called(blockHash) @@ -1280,13 +1179,10 @@ func (_m *MockSubstrateApiI) GetRuntimeVersion(blockHash types.Hash) (*types.Run return r0, r1 } - type MockSubstrateApiI_GetRuntimeVersion_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersion(blockHash interface{}) *MockSubstrateApiI_GetRuntimeVersion_Call { return &MockSubstrateApiI_GetRuntimeVersion_Call{Call: _e.mock.On("GetRuntimeVersion", blockHash)} } @@ -1308,7 +1204,6 @@ func (_c *MockSubstrateApiI_GetRuntimeVersion_Call) RunAndReturn(run func(types. return _c } - func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, error) { ret := _m.Called() @@ -1338,12 +1233,10 @@ func (_m *MockSubstrateApiI) GetRuntimeVersionLatest() (*types.RuntimeVersion, e return r0, r1 } - type MockSubstrateApiI_GetRuntimeVersionLatest_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) GetRuntimeVersionLatest() *MockSubstrateApiI_GetRuntimeVersionLatest_Call { return &MockSubstrateApiI_GetRuntimeVersionLatest_Call{Call: _e.mock.On("GetRuntimeVersionLatest")} } @@ -1365,7 +1258,6 @@ func (_c *MockSubstrateApiI_GetRuntimeVersionLatest_Call) RunAndReturn(run func( return _c } - func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{}, blockHash types.Hash) (bool, error) { ret := _m.Called(key, target, blockHash) @@ -1393,15 +1285,10 @@ func (_m *MockSubstrateApiI) GetStorage(key types.StorageKey, target interface{} return r0, r1 } - type MockSubstrateApiI_GetStorage_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) GetStorage(key interface{}, target interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorage_Call { return &MockSubstrateApiI_GetStorage_Call{Call: _e.mock.On("GetStorage", key, target, blockHash)} } @@ -1423,7 +1310,6 @@ func (_c *MockSubstrateApiI_GetStorage_Call) RunAndReturn(run func(types.Storage return _c } - func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash types.Hash) (types.Hash, error) { ret := _m.Called(key, blockHash) @@ -1453,14 +1339,10 @@ func (_m *MockSubstrateApiI) GetStorageHash(key types.StorageKey, blockHash type return r0, r1 } - type MockSubstrateApiI_GetStorageHash_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetStorageHash(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageHash_Call { return &MockSubstrateApiI_GetStorageHash_Call{Call: _e.mock.On("GetStorageHash", key, blockHash)} } @@ -1482,7 +1364,6 @@ func (_c *MockSubstrateApiI_GetStorageHash_Call) RunAndReturn(run func(types.Sto return _c } - func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.Hash, error) { ret := _m.Called(key) @@ -1512,13 +1393,10 @@ func (_m *MockSubstrateApiI) GetStorageHashLatest(key types.StorageKey) (types.H return r0, r1 } - type MockSubstrateApiI_GetStorageHashLatest_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetStorageHashLatest(key interface{}) *MockSubstrateApiI_GetStorageHashLatest_Call { return &MockSubstrateApiI_GetStorageHashLatest_Call{Call: _e.mock.On("GetStorageHashLatest", key)} } @@ -1540,7 +1418,6 @@ func (_c *MockSubstrateApiI_GetStorageHashLatest_Call) RunAndReturn(run func(typ return _c } - func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target interface{}) (bool, error) { ret := _m.Called(key, target) @@ -1568,14 +1445,10 @@ func (_m *MockSubstrateApiI) GetStorageLatest(key types.StorageKey, target inter return r0, r1 } - type MockSubstrateApiI_GetStorageLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetStorageLatest(key interface{}, target interface{}) *MockSubstrateApiI_GetStorageLatest_Call { return &MockSubstrateApiI_GetStorageLatest_Call{Call: _e.mock.On("GetStorageLatest", key, target)} } @@ -1597,7 +1470,6 @@ func (_c *MockSubstrateApiI_GetStorageLatest_Call) RunAndReturn(run func(types.S return _c } - func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types.Hash) (*types.StorageDataRaw, error) { ret := _m.Called(key, blockHash) @@ -1627,14 +1499,10 @@ func (_m *MockSubstrateApiI) GetStorageRaw(key types.StorageKey, blockHash types return r0, r1 } - type MockSubstrateApiI_GetStorageRaw_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetStorageRaw(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageRaw_Call { return &MockSubstrateApiI_GetStorageRaw_Call{Call: _e.mock.On("GetStorageRaw", key, blockHash)} } @@ -1656,7 +1524,6 @@ func (_c *MockSubstrateApiI_GetStorageRaw_Call) RunAndReturn(run func(types.Stor return _c } - func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.StorageDataRaw, error) { ret := _m.Called(key) @@ -1686,13 +1553,10 @@ func (_m *MockSubstrateApiI) GetStorageRawLatest(key types.StorageKey) (*types.S return r0, r1 } - type MockSubstrateApiI_GetStorageRawLatest_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetStorageRawLatest(key interface{}) *MockSubstrateApiI_GetStorageRawLatest_Call { return &MockSubstrateApiI_GetStorageRawLatest_Call{Call: _e.mock.On("GetStorageRawLatest", key)} } @@ -1714,7 +1578,6 @@ func (_c *MockSubstrateApiI_GetStorageRawLatest_Call) RunAndReturn(run func(type return _c } - func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash types.Hash) (types.U64, error) { ret := _m.Called(key, blockHash) @@ -1742,14 +1605,10 @@ func (_m *MockSubstrateApiI) GetStorageSize(key types.StorageKey, blockHash type return r0, r1 } - type MockSubstrateApiI_GetStorageSize_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) GetStorageSize(key interface{}, blockHash interface{}) *MockSubstrateApiI_GetStorageSize_Call { return &MockSubstrateApiI_GetStorageSize_Call{Call: _e.mock.On("GetStorageSize", key, blockHash)} } @@ -1771,7 +1630,6 @@ func (_c *MockSubstrateApiI_GetStorageSize_Call) RunAndReturn(run func(types.Sto return _c } - func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U64, error) { ret := _m.Called(key) @@ -1799,13 +1657,10 @@ func (_m *MockSubstrateApiI) GetStorageSizeLatest(key types.StorageKey) (types.U return r0, r1 } - type MockSubstrateApiI_GetStorageSizeLatest_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) GetStorageSizeLatest(key interface{}) *MockSubstrateApiI_GetStorageSizeLatest_Call { return &MockSubstrateApiI_GetStorageSizeLatest_Call{Call: _e.mock.On("GetStorageSizeLatest", key)} } @@ -1827,7 +1682,6 @@ func (_c *MockSubstrateApiI_GetStorageSizeLatest_Call) RunAndReturn(run func(typ return _c } - func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { ret := _m.Called() @@ -1857,12 +1711,10 @@ func (_m *MockSubstrateApiI) PendingExtrinsics() ([]types.Extrinsic, error) { return r0, r1 } - type MockSubstrateApiI_PendingExtrinsics_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) PendingExtrinsics() *MockSubstrateApiI_PendingExtrinsics_Call { return &MockSubstrateApiI_PendingExtrinsics_Call{Call: _e.mock.On("PendingExtrinsics")} } @@ -1884,7 +1736,6 @@ func (_c *MockSubstrateApiI_PendingExtrinsics_Call) RunAndReturn(run func() ([]t return _c } - func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock types.Hash, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock, block) @@ -1914,15 +1765,10 @@ func (_m *MockSubstrateApiI) QueryStorage(keys []types.StorageKey, startBlock ty return r0, r1 } - type MockSubstrateApiI_QueryStorage_Call struct { *mock.Call } - - - - func (_e *MockSubstrateApiI_Expecter) QueryStorage(keys interface{}, startBlock interface{}, block interface{}) *MockSubstrateApiI_QueryStorage_Call { return &MockSubstrateApiI_QueryStorage_Call{Call: _e.mock.On("QueryStorage", keys, startBlock, block)} } @@ -1944,7 +1790,6 @@ func (_c *MockSubstrateApiI_QueryStorage_Call) RunAndReturn(run func([]types.Sto return _c } - func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, block) @@ -1974,14 +1819,10 @@ func (_m *MockSubstrateApiI) QueryStorageAt(keys []types.StorageKey, block types return r0, r1 } - type MockSubstrateApiI_QueryStorageAt_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) QueryStorageAt(keys interface{}, block interface{}) *MockSubstrateApiI_QueryStorageAt_Call { return &MockSubstrateApiI_QueryStorageAt_Call{Call: _e.mock.On("QueryStorageAt", keys, block)} } @@ -2003,7 +1844,6 @@ func (_c *MockSubstrateApiI_QueryStorageAt_Call) RunAndReturn(run func([]types.S return _c } - func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]types.StorageChangeSet, error) { ret := _m.Called(keys) @@ -2033,13 +1873,10 @@ func (_m *MockSubstrateApiI) QueryStorageAtLatest(keys []types.StorageKey) ([]ty return r0, r1 } - type MockSubstrateApiI_QueryStorageAtLatest_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) QueryStorageAtLatest(keys interface{}) *MockSubstrateApiI_QueryStorageAtLatest_Call { return &MockSubstrateApiI_QueryStorageAtLatest_Call{Call: _e.mock.On("QueryStorageAtLatest", keys)} } @@ -2061,7 +1898,6 @@ func (_c *MockSubstrateApiI_QueryStorageAtLatest_Call) RunAndReturn(run func([]t return _c } - func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBlock types.Hash) ([]types.StorageChangeSet, error) { ret := _m.Called(keys, startBlock) @@ -2091,14 +1927,10 @@ func (_m *MockSubstrateApiI) QueryStorageLatest(keys []types.StorageKey, startBl return r0, r1 } - type MockSubstrateApiI_QueryStorageLatest_Call struct { *mock.Call } - - - func (_e *MockSubstrateApiI_Expecter) QueryStorageLatest(keys interface{}, startBlock interface{}) *MockSubstrateApiI_QueryStorageLatest_Call { return &MockSubstrateApiI_QueryStorageLatest_Call{Call: _e.mock.On("QueryStorageLatest", keys, startBlock)} } @@ -2120,7 +1952,6 @@ func (_c *MockSubstrateApiI_QueryStorageLatest_Call) RunAndReturn(run func([]typ return _c } - func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*author.ExtrinsicStatusSubscription, error) { ret := _m.Called(xt) @@ -2150,13 +1981,10 @@ func (_m *MockSubstrateApiI) SubmitAndWatchExtrinsic(xt types.Extrinsic) (*autho return r0, r1 } - type MockSubstrateApiI_SubmitAndWatchExtrinsic_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) SubmitAndWatchExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call { return &MockSubstrateApiI_SubmitAndWatchExtrinsic_Call{Call: _e.mock.On("SubmitAndWatchExtrinsic", xt)} } @@ -2178,7 +2006,6 @@ func (_c *MockSubstrateApiI_SubmitAndWatchExtrinsic_Call) RunAndReturn(run func( return _c } - func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, error) { ret := _m.Called(xt) @@ -2208,13 +2035,10 @@ func (_m *MockSubstrateApiI) SubmitExtrinsic(xt types.Extrinsic) (types.Hash, er return r0, r1 } - type MockSubstrateApiI_SubmitExtrinsic_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) SubmitExtrinsic(xt interface{}) *MockSubstrateApiI_SubmitExtrinsic_Call { return &MockSubstrateApiI_SubmitExtrinsic_Call{Call: _e.mock.On("SubmitExtrinsic", xt)} } @@ -2236,7 +2060,6 @@ func (_c *MockSubstrateApiI_SubmitExtrinsic_Call) RunAndReturn(run func(types.Ex return _c } - func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSubscription, error) { ret := _m.Called() @@ -2266,12 +2089,10 @@ func (_m *MockSubstrateApiI) SubscribeFinalizedHeads() (*chain.FinalizedHeadsSub return r0, r1 } - type MockSubstrateApiI_SubscribeFinalizedHeads_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) SubscribeFinalizedHeads() *MockSubstrateApiI_SubscribeFinalizedHeads_Call { return &MockSubstrateApiI_SubscribeFinalizedHeads_Call{Call: _e.mock.On("SubscribeFinalizedHeads")} } @@ -2293,7 +2114,6 @@ func (_c *MockSubstrateApiI_SubscribeFinalizedHeads_Call) RunAndReturn(run func( return _c } - func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, error) { ret := _m.Called() @@ -2323,12 +2143,10 @@ func (_m *MockSubstrateApiI) SubscribeNewHeads() (*chain.NewHeadsSubscription, e return r0, r1 } - type MockSubstrateApiI_SubscribeNewHeads_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) SubscribeNewHeads() *MockSubstrateApiI_SubscribeNewHeads_Call { return &MockSubstrateApiI_SubscribeNewHeads_Call{Call: _e.mock.On("SubscribeNewHeads")} } @@ -2350,7 +2168,6 @@ func (_c *MockSubstrateApiI_SubscribeNewHeads_Call) RunAndReturn(run func() (*ch return _c } - func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSubscription, error) { ret := _m.Called() @@ -2380,12 +2197,10 @@ func (_m *MockSubstrateApiI) SubscribeRuntimeVersion() (*state.RuntimeVersionSub return r0, r1 } - type MockSubstrateApiI_SubscribeRuntimeVersion_Call struct { *mock.Call } - func (_e *MockSubstrateApiI_Expecter) SubscribeRuntimeVersion() *MockSubstrateApiI_SubscribeRuntimeVersion_Call { return &MockSubstrateApiI_SubscribeRuntimeVersion_Call{Call: _e.mock.On("SubscribeRuntimeVersion")} } @@ -2407,7 +2222,6 @@ func (_c *MockSubstrateApiI_SubscribeRuntimeVersion_Call) RunAndReturn(run func( return _c } - func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*state.StorageSubscription, error) { ret := _m.Called(keys) @@ -2437,13 +2251,10 @@ func (_m *MockSubstrateApiI) SubscribeStorageRaw(keys []types.StorageKey) (*stat return r0, r1 } - type MockSubstrateApiI_SubscribeStorageRaw_Call struct { *mock.Call } - - func (_e *MockSubstrateApiI_Expecter) SubscribeStorageRaw(keys interface{}) *MockSubstrateApiI_SubscribeStorageRaw_Call { return &MockSubstrateApiI_SubscribeStorageRaw_Call{Call: _e.mock.On("SubscribeStorageRaw", keys)} } @@ -2465,12 +2276,11 @@ func (_c *MockSubstrateApiI_SubscribeStorageRaw_Call) RunAndReturn(run func([]ty return _c } - - func NewMockSubstrateApiI(t interface { mock.TestingT Cleanup(func()) -}) *MockSubstrateApiI { +}, +) *MockSubstrateApiI { mock := &MockSubstrateApiI{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go index cb248d62a..0e08b8913 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/celestia/types/mock_CelestiaRPCClient.go @@ -1,5 +1,3 @@ - - package types import ( @@ -16,7 +14,6 @@ import ( share "github.com/celestiaorg/celestia-openrpc/types/share" ) - type MockCelestiaRPCClient struct { mock.Mock } @@ -29,7 +26,6 @@ func (_m *MockCelestiaRPCClient) EXPECT() *MockCelestiaRPCClient_Expecter { return &MockCelestiaRPCClient_Expecter{mock: &_m.Mock} } - func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Blob, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -59,16 +55,10 @@ func (_m *MockCelestiaRPCClient) Get(ctx context.Context, height uint64, namespa return r0, r1 } - type MockCelestiaRPCClient_Get_Call struct { *mock.Call } - - - - - func (_e *MockCelestiaRPCClient_Expecter) Get(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_Get_Call { return &MockCelestiaRPCClient_Get_Call{Call: _e.mock.On("Get", ctx, height, namespace, commitment)} } @@ -90,7 +80,6 @@ func (_c *MockCelestiaRPCClient_Get_Call) RunAndReturn(run func(context.Context, return _c } - func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []share.Namespace) ([]*blob.Blob, error) { ret := _m.Called(_a0, _a1, _a2) @@ -120,15 +109,10 @@ func (_m *MockCelestiaRPCClient) GetAll(_a0 context.Context, _a1 uint64, _a2 []s return r0, r1 } - type MockCelestiaRPCClient_GetAll_Call struct { *mock.Call } - - - - func (_e *MockCelestiaRPCClient_Expecter) GetAll(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockCelestiaRPCClient_GetAll_Call { return &MockCelestiaRPCClient_GetAll_Call{Call: _e.mock.On("GetAll", _a0, _a1, _a2)} } @@ -150,7 +134,6 @@ func (_c *MockCelestiaRPCClient_GetAll_Call) RunAndReturn(run func(context.Conte return _c } - func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) (*header.ExtendedHeader, error) { ret := _m.Called(ctx, height) @@ -180,14 +163,10 @@ func (_m *MockCelestiaRPCClient) GetByHeight(ctx context.Context, height uint64) return r0, r1 } - type MockCelestiaRPCClient_GetByHeight_Call struct { *mock.Call } - - - func (_e *MockCelestiaRPCClient_Expecter) GetByHeight(ctx interface{}, height interface{}) *MockCelestiaRPCClient_GetByHeight_Call { return &MockCelestiaRPCClient_GetByHeight_Call{Call: _e.mock.On("GetByHeight", ctx, height)} } @@ -209,7 +188,6 @@ func (_c *MockCelestiaRPCClient_GetByHeight_Call) RunAndReturn(run func(context. return _c } - func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, namespace share.Namespace, commitment blob.Commitment) (*blob.Proof, error) { ret := _m.Called(ctx, height, namespace, commitment) @@ -239,16 +217,10 @@ func (_m *MockCelestiaRPCClient) GetProof(ctx context.Context, height uint64, na return r0, r1 } - type MockCelestiaRPCClient_GetProof_Call struct { *mock.Call } - - - - - func (_e *MockCelestiaRPCClient_Expecter) GetProof(ctx interface{}, height interface{}, namespace interface{}, commitment interface{}) *MockCelestiaRPCClient_GetProof_Call { return &MockCelestiaRPCClient_GetProof_Call{Call: _e.mock.On("GetProof", ctx, height, namespace, commitment)} } @@ -270,7 +242,6 @@ func (_c *MockCelestiaRPCClient_GetProof_Call) RunAndReturn(run func(context.Con return _c } - func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coin, error) { ret := _m.Called(ctx) @@ -300,13 +271,10 @@ func (_m *MockCelestiaRPCClient) GetSignerBalance(ctx context.Context) (*sdk.Coi return r0, r1 } - type MockCelestiaRPCClient_GetSignerBalance_Call struct { *mock.Call } - - func (_e *MockCelestiaRPCClient_Expecter) GetSignerBalance(ctx interface{}) *MockCelestiaRPCClient_GetSignerBalance_Call { return &MockCelestiaRPCClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance", ctx)} } @@ -328,7 +296,6 @@ func (_c *MockCelestiaRPCClient_GetSignerBalance_Call) RunAndReturn(run func(con return _c } - func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, namespace share.Namespace, proof *blob.Proof, commitment blob.Commitment) (bool, error) { ret := _m.Called(ctx, height, namespace, proof, commitment) @@ -356,17 +323,10 @@ func (_m *MockCelestiaRPCClient) Included(ctx context.Context, height uint64, na return r0, r1 } - type MockCelestiaRPCClient_Included_Call struct { *mock.Call } - - - - - - func (_e *MockCelestiaRPCClient_Expecter) Included(ctx interface{}, height interface{}, namespace interface{}, proof interface{}, commitment interface{}) *MockCelestiaRPCClient_Included_Call { return &MockCelestiaRPCClient_Included_Call{Call: _e.mock.On("Included", ctx, height, namespace, proof, commitment)} } @@ -388,7 +348,6 @@ func (_c *MockCelestiaRPCClient_Included_Call) RunAndReturn(run func(context.Con return _c } - func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, options *blob.SubmitOptions) (uint64, error) { ret := _m.Called(ctx, blobs, options) @@ -416,15 +375,10 @@ func (_m *MockCelestiaRPCClient) Submit(ctx context.Context, blobs []*blob.Blob, return r0, r1 } - type MockCelestiaRPCClient_Submit_Call struct { *mock.Call } - - - - func (_e *MockCelestiaRPCClient_Expecter) Submit(ctx interface{}, blobs interface{}, options interface{}) *MockCelestiaRPCClient_Submit_Call { return &MockCelestiaRPCClient_Submit_Call{Call: _e.mock.On("Submit", ctx, blobs, options)} } @@ -446,12 +400,11 @@ func (_c *MockCelestiaRPCClient_Submit_Call) RunAndReturn(run func(context.Conte return _c } - - func NewMockCelestiaRPCClient(t interface { mock.TestingT Cleanup(func()) -}) *MockCelestiaRPCClient { +}, +) *MockCelestiaRPCClient { mock := &MockCelestiaRPCClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go index c116222ed..6770b587b 100644 --- a/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go +++ b/mocks/github.com/dymensionxyz/dymint/da/mock_DataAvailabilityLayerClient.go @@ -1,5 +1,3 @@ - - package da import ( @@ -13,7 +11,6 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - type MockDataAvailabilityLayerClient struct { mock.Mock } @@ -26,7 +23,6 @@ func (_m *MockDataAvailabilityLayerClient) EXPECT() *MockDataAvailabilityLayerCl return &MockDataAvailabilityLayerClient_Expecter{mock: &_m.Mock} } - func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { ret := _m.Called(daMetaData) @@ -44,13 +40,10 @@ func (_m *MockDataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da return r0 } - type MockDataAvailabilityLayerClient_CheckBatchAvailability_Call struct { *mock.Call } - - func (_e *MockDataAvailabilityLayerClient_Expecter) CheckBatchAvailability(daMetaData interface{}) *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call { return &MockDataAvailabilityLayerClient_CheckBatchAvailability_Call{Call: _e.mock.On("CheckBatchAvailability", daMetaData)} } @@ -72,7 +65,6 @@ func (_c *MockDataAvailabilityLayerClient_CheckBatchAvailability_Call) RunAndRet return _c } - func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { ret := _m.Called() @@ -90,12 +82,10 @@ func (_m *MockDataAvailabilityLayerClient) GetClientType() da.Client { return r0 } - type MockDataAvailabilityLayerClient_GetClientType_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) GetClientType() *MockDataAvailabilityLayerClient_GetClientType_Call { return &MockDataAvailabilityLayerClient_GetClientType_Call{Call: _e.mock.On("GetClientType")} } @@ -117,7 +107,6 @@ func (_c *MockDataAvailabilityLayerClient_GetClientType_Call) RunAndReturn(run f return _c } - func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { ret := _m.Called() @@ -135,12 +124,10 @@ func (_m *MockDataAvailabilityLayerClient) GetMaxBlobSizeBytes() uint32 { return r0 } - type MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) GetMaxBlobSizeBytes() *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call { return &MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call{Call: _e.mock.On("GetMaxBlobSizeBytes")} } @@ -162,7 +149,6 @@ func (_c *MockDataAvailabilityLayerClient_GetMaxBlobSizeBytes_Call) RunAndReturn return _c } - func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error) { ret := _m.Called() @@ -190,12 +176,10 @@ func (_m *MockDataAvailabilityLayerClient) GetSignerBalance() (da.Balance, error return r0, r1 } - type MockDataAvailabilityLayerClient_GetSignerBalance_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) GetSignerBalance() *MockDataAvailabilityLayerClient_GetSignerBalance_Call { return &MockDataAvailabilityLayerClient_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -217,7 +201,6 @@ func (_c *MockDataAvailabilityLayerClient_GetSignerBalance_Call) RunAndReturn(ru return _c } - func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pubsub.Server, kvStore store.KV, logger types.Logger, options ...da.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -242,17 +225,10 @@ func (_m *MockDataAvailabilityLayerClient) Init(config []byte, pubsubServer *pub return r0 } - type MockDataAvailabilityLayerClient_Init_Call struct { *mock.Call } - - - - - - func (_e *MockDataAvailabilityLayerClient_Expecter) Init(config interface{}, pubsubServer interface{}, kvStore interface{}, logger interface{}, options ...interface{}) *MockDataAvailabilityLayerClient_Init_Call { return &MockDataAvailabilityLayerClient_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, pubsubServer, kvStore, logger}, options...)...)} @@ -281,7 +257,6 @@ func (_c *MockDataAvailabilityLayerClient_Init_Call) RunAndReturn(run func([]byt return _c } - func (_m *MockDataAvailabilityLayerClient) Start() error { ret := _m.Called() @@ -299,12 +274,10 @@ func (_m *MockDataAvailabilityLayerClient) Start() error { return r0 } - type MockDataAvailabilityLayerClient_Start_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) Start() *MockDataAvailabilityLayerClient_Start_Call { return &MockDataAvailabilityLayerClient_Start_Call{Call: _e.mock.On("Start")} } @@ -326,7 +299,6 @@ func (_c *MockDataAvailabilityLayerClient_Start_Call) RunAndReturn(run func() er return _c } - func (_m *MockDataAvailabilityLayerClient) Stop() error { ret := _m.Called() @@ -344,12 +316,10 @@ func (_m *MockDataAvailabilityLayerClient) Stop() error { return r0 } - type MockDataAvailabilityLayerClient_Stop_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) Stop() *MockDataAvailabilityLayerClient_Stop_Call { return &MockDataAvailabilityLayerClient_Stop_Call{Call: _e.mock.On("Stop")} } @@ -371,7 +341,6 @@ func (_c *MockDataAvailabilityLayerClient_Stop_Call) RunAndReturn(run func() err return _c } - func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { ret := _m.Called(batch) @@ -389,13 +358,10 @@ func (_m *MockDataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.Re return r0 } - type MockDataAvailabilityLayerClient_SubmitBatch_Call struct { *mock.Call } - - func (_e *MockDataAvailabilityLayerClient_Expecter) SubmitBatch(batch interface{}) *MockDataAvailabilityLayerClient_SubmitBatch_Call { return &MockDataAvailabilityLayerClient_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch)} } @@ -417,17 +383,14 @@ func (_c *MockDataAvailabilityLayerClient_SubmitBatch_Call) RunAndReturn(run fun return _c } - func (_m *MockDataAvailabilityLayerClient) WaitForSyncing() { _m.Called() } - type MockDataAvailabilityLayerClient_WaitForSyncing_Call struct { *mock.Call } - func (_e *MockDataAvailabilityLayerClient_Expecter) WaitForSyncing() *MockDataAvailabilityLayerClient_WaitForSyncing_Call { return &MockDataAvailabilityLayerClient_WaitForSyncing_Call{Call: _e.mock.On("WaitForSyncing")} } @@ -449,12 +412,11 @@ func (_c *MockDataAvailabilityLayerClient_WaitForSyncing_Call) RunAndReturn(run return _c } - - func NewMockDataAvailabilityLayerClient(t interface { mock.TestingT Cleanup(func()) -}) *MockDataAvailabilityLayerClient { +}, +) *MockDataAvailabilityLayerClient { mock := &MockDataAvailabilityLayerClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go index de07e1a71..c4771e121 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_ProposerGetter.go @@ -1,5 +1,3 @@ - - package p2p import ( @@ -7,7 +5,6 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) - type MockProposerGetter struct { mock.Mock } @@ -20,7 +17,6 @@ func (_m *MockProposerGetter) EXPECT() *MockProposerGetter_Expecter { return &MockProposerGetter_Expecter{mock: &_m.Mock} } - func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +36,10 @@ func (_m *MockProposerGetter) GetProposerPubKey() crypto.PubKey { return r0 } - type MockProposerGetter_GetProposerPubKey_Call struct { *mock.Call } - func (_e *MockProposerGetter_Expecter) GetProposerPubKey() *MockProposerGetter_GetProposerPubKey_Call { return &MockProposerGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +61,6 @@ func (_c *MockProposerGetter_GetProposerPubKey_Call) RunAndReturn(run func() cry return _c } - func (_m *MockProposerGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +78,10 @@ func (_m *MockProposerGetter) GetRevision() uint64 { return r0 } - type MockProposerGetter_GetRevision_Call struct { *mock.Call } - func (_e *MockProposerGetter_Expecter) GetRevision() *MockProposerGetter_GetRevision_Call { return &MockProposerGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,12 +103,11 @@ func (_c *MockProposerGetter_GetRevision_Call) RunAndReturn(run func() uint64) * return _c } - - func NewMockProposerGetter(t interface { mock.TestingT Cleanup(func()) -}) *MockProposerGetter { +}, +) *MockProposerGetter { mock := &MockProposerGetter{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go index 477be16f8..e82e7496e 100644 --- a/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go +++ b/mocks/github.com/dymensionxyz/dymint/p2p/mock_StateGetter.go @@ -1,5 +1,3 @@ - - package p2p import ( @@ -7,7 +5,6 @@ import ( crypto "github.com/tendermint/tendermint/crypto" ) - type MockStateGetter struct { mock.Mock } @@ -20,7 +17,6 @@ func (_m *MockStateGetter) EXPECT() *MockStateGetter_Expecter { return &MockStateGetter_Expecter{mock: &_m.Mock} } - func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { ret := _m.Called() @@ -40,12 +36,10 @@ func (_m *MockStateGetter) GetProposerPubKey() crypto.PubKey { return r0 } - type MockStateGetter_GetProposerPubKey_Call struct { *mock.Call } - func (_e *MockStateGetter_Expecter) GetProposerPubKey() *MockStateGetter_GetProposerPubKey_Call { return &MockStateGetter_GetProposerPubKey_Call{Call: _e.mock.On("GetProposerPubKey")} } @@ -67,7 +61,6 @@ func (_c *MockStateGetter_GetProposerPubKey_Call) RunAndReturn(run func() crypto return _c } - func (_m *MockStateGetter) GetRevision() uint64 { ret := _m.Called() @@ -85,12 +78,10 @@ func (_m *MockStateGetter) GetRevision() uint64 { return r0 } - type MockStateGetter_GetRevision_Call struct { *mock.Call } - func (_e *MockStateGetter_Expecter) GetRevision() *MockStateGetter_GetRevision_Call { return &MockStateGetter_GetRevision_Call{Call: _e.mock.On("GetRevision")} } @@ -112,12 +103,11 @@ func (_c *MockStateGetter_GetRevision_Call) RunAndReturn(run func() uint64) *Moc return _c } - - func NewMockStateGetter(t interface { mock.TestingT Cleanup(func()) -}) *MockStateGetter { +}, +) *MockStateGetter { mock := &MockStateGetter{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go index f79c856c1..dacc232d3 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/dymension/mock_CosmosClient.go @@ -1,5 +1,3 @@ - - package dymension import ( @@ -22,7 +20,6 @@ import ( types "github.com/cosmos/cosmos-sdk/types" ) - type MockCosmosClient struct { mock.Mock } @@ -35,7 +32,6 @@ func (_m *MockCosmosClient) EXPECT() *MockCosmosClient_Expecter { return &MockCosmosClient_Expecter{mock: &_m.Mock} } - func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) (cosmosclient.Response, error) { _va := make([]interface{}, len(msgs)) for _i := range msgs { @@ -70,14 +66,10 @@ func (_m *MockCosmosClient) BroadcastTx(accountName string, msgs ...types.Msg) ( return r0, r1 } - type MockCosmosClient_BroadcastTx_Call struct { *mock.Call } - - - func (_e *MockCosmosClient_Expecter) BroadcastTx(accountName interface{}, msgs ...interface{}) *MockCosmosClient_BroadcastTx_Call { return &MockCosmosClient_BroadcastTx_Call{Call: _e.mock.On("BroadcastTx", append([]interface{}{accountName}, msgs...)...)} @@ -106,7 +98,6 @@ func (_c *MockCosmosClient_BroadcastTx_Call) RunAndReturn(run func(string, ...ty return _c } - func (_m *MockCosmosClient) Context() client.Context { ret := _m.Called() @@ -124,12 +115,10 @@ func (_m *MockCosmosClient) Context() client.Context { return r0 } - type MockCosmosClient_Context_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) Context() *MockCosmosClient_Context_Call { return &MockCosmosClient_Context_Call{Call: _e.mock.On("Context")} } @@ -151,7 +140,6 @@ func (_c *MockCosmosClient_Context_Call) RunAndReturn(run func() client.Context) return _c } - func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { ret := _m.Called() @@ -171,12 +159,10 @@ func (_m *MockCosmosClient) EventListenerQuit() <-chan struct{} { return r0 } - type MockCosmosClient_EventListenerQuit_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) EventListenerQuit() *MockCosmosClient_EventListenerQuit_Call { return &MockCosmosClient_EventListenerQuit_Call{Call: _e.mock.On("EventListenerQuit")} } @@ -198,7 +184,6 @@ func (_c *MockCosmosClient_EventListenerQuit_Call) RunAndReturn(run func() <-cha return _c } - func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Account, error) { ret := _m.Called(accountName) @@ -226,13 +211,10 @@ func (_m *MockCosmosClient) GetAccount(accountName string) (cosmosaccount.Accoun return r0, r1 } - type MockCosmosClient_GetAccount_Call struct { *mock.Call } - - func (_e *MockCosmosClient_Expecter) GetAccount(accountName interface{}) *MockCosmosClient_GetAccount_Call { return &MockCosmosClient_GetAccount_Call{Call: _e.mock.On("GetAccount", accountName)} } @@ -254,7 +236,6 @@ func (_c *MockCosmosClient_GetAccount_Call) RunAndReturn(run func(string) (cosmo return _c } - func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, denom string) (*types.Coin, error) { ret := _m.Called(ctx, accountName, denom) @@ -284,15 +265,10 @@ func (_m *MockCosmosClient) GetBalance(ctx context.Context, accountName string, return r0, r1 } - type MockCosmosClient_GetBalance_Call struct { *mock.Call } - - - - func (_e *MockCosmosClient_Expecter) GetBalance(ctx interface{}, accountName interface{}, denom interface{}) *MockCosmosClient_GetBalance_Call { return &MockCosmosClient_GetBalance_Call{Call: _e.mock.On("GetBalance", ctx, accountName, denom)} } @@ -314,7 +290,6 @@ func (_c *MockCosmosClient_GetBalance_Call) RunAndReturn(run func(context.Contex return _c } - func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { ret := _m.Called() @@ -334,12 +309,10 @@ func (_m *MockCosmosClient) GetRollappClient() rollapp.QueryClient { return r0 } - type MockCosmosClient_GetRollappClient_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) GetRollappClient() *MockCosmosClient_GetRollappClient_Call { return &MockCosmosClient_GetRollappClient_Call{Call: _e.mock.On("GetRollappClient")} } @@ -361,7 +334,6 @@ func (_c *MockCosmosClient_GetRollappClient_Call) RunAndReturn(run func() rollap return _c } - func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { ret := _m.Called() @@ -381,12 +353,10 @@ func (_m *MockCosmosClient) GetSequencerClient() sequencer.QueryClient { return r0 } - type MockCosmosClient_GetSequencerClient_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) GetSequencerClient() *MockCosmosClient_GetSequencerClient_Call { return &MockCosmosClient_GetSequencerClient_Call{Call: _e.mock.On("GetSequencerClient")} } @@ -408,7 +378,6 @@ func (_c *MockCosmosClient_GetSequencerClient_Call) RunAndReturn(run func() sequ return _c } - func (_m *MockCosmosClient) StartEventListener() error { ret := _m.Called() @@ -426,12 +395,10 @@ func (_m *MockCosmosClient) StartEventListener() error { return r0 } - type MockCosmosClient_StartEventListener_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) StartEventListener() *MockCosmosClient_StartEventListener_Call { return &MockCosmosClient_StartEventListener_Call{Call: _e.mock.On("StartEventListener")} } @@ -453,7 +420,6 @@ func (_c *MockCosmosClient_StartEventListener_Call) RunAndReturn(run func() erro return _c } - func (_m *MockCosmosClient) StopEventListener() error { ret := _m.Called() @@ -471,12 +437,10 @@ func (_m *MockCosmosClient) StopEventListener() error { return r0 } - type MockCosmosClient_StopEventListener_Call struct { *mock.Call } - func (_e *MockCosmosClient_Expecter) StopEventListener() *MockCosmosClient_StopEventListener_Call { return &MockCosmosClient_StopEventListener_Call{Call: _e.mock.On("StopEventListener")} } @@ -498,7 +462,6 @@ func (_c *MockCosmosClient_StopEventListener_Call) RunAndReturn(run func() error return _c } - func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) for _i := range outCapacity { @@ -535,16 +498,10 @@ func (_m *MockCosmosClient) SubscribeToEvents(ctx context.Context, subscriber st return r0, r1 } - type MockCosmosClient_SubscribeToEvents_Call struct { *mock.Call } - - - - - func (_e *MockCosmosClient_Expecter) SubscribeToEvents(ctx interface{}, subscriber interface{}, query interface{}, outCapacity ...interface{}) *MockCosmosClient_SubscribeToEvents_Call { return &MockCosmosClient_SubscribeToEvents_Call{Call: _e.mock.On("SubscribeToEvents", append([]interface{}{ctx, subscriber, query}, outCapacity...)...)} @@ -573,7 +530,6 @@ func (_c *MockCosmosClient_SubscribeToEvents_Call) RunAndReturn(run func(context return _c } - func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) @@ -591,14 +547,10 @@ func (_m *MockCosmosClient) UnsubscribeAll(ctx context.Context, subscriber strin return r0 } - type MockCosmosClient_UnsubscribeAll_Call struct { *mock.Call } - - - func (_e *MockCosmosClient_Expecter) UnsubscribeAll(ctx interface{}, subscriber interface{}) *MockCosmosClient_UnsubscribeAll_Call { return &MockCosmosClient_UnsubscribeAll_Call{Call: _e.mock.On("UnsubscribeAll", ctx, subscriber)} } @@ -620,12 +572,11 @@ func (_c *MockCosmosClient_UnsubscribeAll_Call) RunAndReturn(run func(context.Co return _c } - - func NewMockCosmosClient(t interface { mock.TestingT Cleanup(func()) -}) *MockCosmosClient { +}, +) *MockCosmosClient { mock := &MockCosmosClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go index c41be7a74..bfac2e44d 100644 --- a/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go +++ b/mocks/github.com/dymensionxyz/dymint/settlement/mock_ClientI.go @@ -1,5 +1,3 @@ - - package settlement import ( @@ -15,7 +13,6 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - type MockClientI struct { mock.Mock } @@ -28,7 +25,6 @@ func (_m *MockClientI) EXPECT() *MockClientI_Expecter { return &MockClientI_Expecter{mock: &_m.Mock} } - func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -58,12 +54,10 @@ func (_m *MockClientI) GetAllSequencers() ([]types.Sequencer, error) { return r0, r1 } - type MockClientI_GetAllSequencers_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetAllSequencers() *MockClientI_GetAllSequencers_Call { return &MockClientI_GetAllSequencers_Call{Call: _e.mock.On("GetAllSequencers")} } @@ -85,7 +79,6 @@ func (_c *MockClientI_GetAllSequencers_Call) RunAndReturn(run func() ([]types.Se return _c } - func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -115,13 +108,10 @@ func (_m *MockClientI) GetBatchAtHeight(index uint64) (*settlement.ResultRetriev return r0, r1 } - type MockClientI_GetBatchAtHeight_Call struct { *mock.Call } - - func (_e *MockClientI_Expecter) GetBatchAtHeight(index interface{}) *MockClientI_GetBatchAtHeight_Call { return &MockClientI_GetBatchAtHeight_Call{Call: _e.mock.On("GetBatchAtHeight", index)} } @@ -143,7 +133,6 @@ func (_c *MockClientI_GetBatchAtHeight_Call) RunAndReturn(run func(uint64) (*set return _c } - func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { ret := _m.Called(index) @@ -173,13 +162,10 @@ func (_m *MockClientI) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieve return r0, r1 } - type MockClientI_GetBatchAtIndex_Call struct { *mock.Call } - - func (_e *MockClientI_Expecter) GetBatchAtIndex(index interface{}) *MockClientI_GetBatchAtIndex_Call { return &MockClientI_GetBatchAtIndex_Call{Call: _e.mock.On("GetBatchAtIndex", index)} } @@ -201,7 +187,6 @@ func (_c *MockClientI_GetBatchAtIndex_Call) RunAndReturn(run func(uint64) (*sett return _c } - func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { ret := _m.Called() @@ -231,12 +216,10 @@ func (_m *MockClientI) GetBondedSequencers() ([]types.Sequencer, error) { return r0, r1 } - type MockClientI_GetBondedSequencers_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetBondedSequencers() *MockClientI_GetBondedSequencers_Call { return &MockClientI_GetBondedSequencers_Call{Call: _e.mock.On("GetBondedSequencers")} } @@ -258,7 +241,6 @@ func (_c *MockClientI_GetBondedSequencers_Call) RunAndReturn(run func() ([]types return _c } - func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { ret := _m.Called() @@ -288,12 +270,10 @@ func (_m *MockClientI) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) return r0, r1 } - type MockClientI_GetLatestBatch_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetLatestBatch() *MockClientI_GetLatestBatch_Call { return &MockClientI_GetLatestBatch_Call{Call: _e.mock.On("GetLatestBatch")} } @@ -315,7 +295,6 @@ func (_c *MockClientI_GetLatestBatch_Call) RunAndReturn(run func() (*settlement. return _c } - func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { ret := _m.Called() @@ -343,12 +322,10 @@ func (_m *MockClientI) GetLatestFinalizedHeight() (uint64, error) { return r0, r1 } - type MockClientI_GetLatestFinalizedHeight_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetLatestFinalizedHeight() *MockClientI_GetLatestFinalizedHeight_Call { return &MockClientI_GetLatestFinalizedHeight_Call{Call: _e.mock.On("GetLatestFinalizedHeight")} } @@ -370,7 +347,6 @@ func (_c *MockClientI_GetLatestFinalizedHeight_Call) RunAndReturn(run func() (ui return _c } - func (_m *MockClientI) GetLatestHeight() (uint64, error) { ret := _m.Called() @@ -398,12 +374,10 @@ func (_m *MockClientI) GetLatestHeight() (uint64, error) { return r0, r1 } - type MockClientI_GetLatestHeight_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetLatestHeight() *MockClientI_GetLatestHeight_Call { return &MockClientI_GetLatestHeight_Call{Call: _e.mock.On("GetLatestHeight")} } @@ -425,7 +399,6 @@ func (_c *MockClientI_GetLatestHeight_Call) RunAndReturn(run func() (uint64, err return _c } - func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { ret := _m.Called() @@ -455,12 +428,10 @@ func (_m *MockClientI) GetNextProposer() (*types.Sequencer, error) { return r0, r1 } - type MockClientI_GetNextProposer_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetNextProposer() *MockClientI_GetNextProposer_Call { return &MockClientI_GetNextProposer_Call{Call: _e.mock.On("GetNextProposer")} } @@ -482,7 +453,6 @@ func (_c *MockClientI_GetNextProposer_Call) RunAndReturn(run func() (*types.Sequ return _c } - func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { ret := _m.Called() @@ -512,12 +482,10 @@ func (_m *MockClientI) GetObsoleteDrs() ([]uint32, error) { return r0, r1 } - type MockClientI_GetObsoleteDrs_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetObsoleteDrs() *MockClientI_GetObsoleteDrs_Call { return &MockClientI_GetObsoleteDrs_Call{Call: _e.mock.On("GetObsoleteDrs")} } @@ -539,7 +507,6 @@ func (_c *MockClientI_GetObsoleteDrs_Call) RunAndReturn(run func() ([]uint32, er return _c } - func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ret := _m.Called(height) @@ -569,13 +536,10 @@ func (_m *MockClientI) GetProposerAtHeight(height int64) (*types.Sequencer, erro return r0, r1 } - type MockClientI_GetProposerAtHeight_Call struct { *mock.Call } - - func (_e *MockClientI_Expecter) GetProposerAtHeight(height interface{}) *MockClientI_GetProposerAtHeight_Call { return &MockClientI_GetProposerAtHeight_Call{Call: _e.mock.On("GetProposerAtHeight", height)} } @@ -597,7 +561,6 @@ func (_c *MockClientI_GetProposerAtHeight_Call) RunAndReturn(run func(int64) (*t return _c } - func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { ret := _m.Called() @@ -627,12 +590,10 @@ func (_m *MockClientI) GetRollapp() (*types.Rollapp, error) { return r0, r1 } - type MockClientI_GetRollapp_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetRollapp() *MockClientI_GetRollapp_Call { return &MockClientI_GetRollapp_Call{Call: _e.mock.On("GetRollapp")} } @@ -654,7 +615,6 @@ func (_c *MockClientI_GetRollapp_Call) RunAndReturn(run func() (*types.Rollapp, return _c } - func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, error) { ret := _m.Called(address) @@ -682,13 +642,10 @@ func (_m *MockClientI) GetSequencerByAddress(address string) (types.Sequencer, e return r0, r1 } - type MockClientI_GetSequencerByAddress_Call struct { *mock.Call } - - func (_e *MockClientI_Expecter) GetSequencerByAddress(address interface{}) *MockClientI_GetSequencerByAddress_Call { return &MockClientI_GetSequencerByAddress_Call{Call: _e.mock.On("GetSequencerByAddress", address)} } @@ -710,7 +667,6 @@ func (_c *MockClientI_GetSequencerByAddress_Call) RunAndReturn(run func(string) return _c } - func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { ret := _m.Called() @@ -738,12 +694,10 @@ func (_m *MockClientI) GetSignerBalance() (types.Balance, error) { return r0, r1 } - type MockClientI_GetSignerBalance_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) GetSignerBalance() *MockClientI_GetSignerBalance_Call { return &MockClientI_GetSignerBalance_Call{Call: _e.mock.On("GetSignerBalance")} } @@ -765,7 +719,6 @@ func (_c *MockClientI_GetSignerBalance_Call) RunAndReturn(run func() (types.Bala return _c } - func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pubsub.Server, logger types.Logger, options ...settlement.Option) error { _va := make([]interface{}, len(options)) for _i := range options { @@ -790,17 +743,10 @@ func (_m *MockClientI) Init(config settlement.Config, rollappId string, _a2 *pub return r0 } - type MockClientI_Init_Call struct { *mock.Call } - - - - - - func (_e *MockClientI_Expecter) Init(config interface{}, rollappId interface{}, _a2 interface{}, logger interface{}, options ...interface{}) *MockClientI_Init_Call { return &MockClientI_Init_Call{Call: _e.mock.On("Init", append([]interface{}{config, rollappId, _a2, logger}, options...)...)} @@ -829,7 +775,6 @@ func (_c *MockClientI_Init_Call) RunAndReturn(run func(settlement.Config, string return _c } - func (_m *MockClientI) Start() error { ret := _m.Called() @@ -847,12 +792,10 @@ func (_m *MockClientI) Start() error { return r0 } - type MockClientI_Start_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) Start() *MockClientI_Start_Call { return &MockClientI_Start_Call{Call: _e.mock.On("Start")} } @@ -874,7 +817,6 @@ func (_c *MockClientI_Start_Call) RunAndReturn(run func() error) *MockClientI_St return _c } - func (_m *MockClientI) Stop() error { ret := _m.Called() @@ -892,12 +834,10 @@ func (_m *MockClientI) Stop() error { return r0 } - type MockClientI_Stop_Call struct { *mock.Call } - func (_e *MockClientI_Expecter) Stop() *MockClientI_Stop_Call { return &MockClientI_Stop_Call{Call: _e.mock.On("Stop")} } @@ -919,7 +859,6 @@ func (_c *MockClientI_Stop_Call) RunAndReturn(run func() error) *MockClientI_Sto return _c } - func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { ret := _m.Called(batch, daClient, daResult) @@ -937,15 +876,10 @@ func (_m *MockClientI) SubmitBatch(batch *types.Batch, daClient da.Client, daRes return r0 } - type MockClientI_SubmitBatch_Call struct { *mock.Call } - - - - func (_e *MockClientI_Expecter) SubmitBatch(batch interface{}, daClient interface{}, daResult interface{}) *MockClientI_SubmitBatch_Call { return &MockClientI_SubmitBatch_Call{Call: _e.mock.On("SubmitBatch", batch, daClient, daResult)} } @@ -967,7 +901,6 @@ func (_c *MockClientI_SubmitBatch_Call) RunAndReturn(run func(*types.Batch, da.C return _c } - func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error { ret := _m.Called(data) @@ -985,13 +918,10 @@ func (_m *MockClientI) ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) return r0 } - type MockClientI_ValidateGenesisBridgeData_Call struct { *mock.Call } - - func (_e *MockClientI_Expecter) ValidateGenesisBridgeData(data interface{}) *MockClientI_ValidateGenesisBridgeData_Call { return &MockClientI_ValidateGenesisBridgeData_Call{Call: _e.mock.On("ValidateGenesisBridgeData", data)} } @@ -1013,12 +943,11 @@ func (_c *MockClientI_ValidateGenesisBridgeData_Call) RunAndReturn(run func(roll return _c } - - func NewMockClientI(t interface { mock.TestingT Cleanup(func()) -}) *MockClientI { +}, +) *MockClientI { mock := &MockClientI{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go index 8ee0e6d75..baf9e436d 100644 --- a/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go +++ b/mocks/github.com/dymensionxyz/dymint/store/mock_Store.go @@ -1,5 +1,3 @@ - - package store import ( @@ -13,7 +11,6 @@ import ( types "github.com/dymensionxyz/dymint/types" ) - type MockStore struct { mock.Mock } @@ -26,7 +23,6 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } - func (_m *MockStore) Close() error { ret := _m.Called() @@ -44,12 +40,10 @@ func (_m *MockStore) Close() error { return r0 } - type MockStore_Close_Call struct { *mock.Call } - func (_e *MockStore_Expecter) Close() *MockStore_Close_Call { return &MockStore_Close_Call{Call: _e.mock.On("Close")} } @@ -71,7 +65,6 @@ func (_c *MockStore_Close_Call) RunAndReturn(run func() error) *MockStore_Close_ return _c } - func (_m *MockStore) LoadBaseHeight() (uint64, error) { ret := _m.Called() @@ -99,12 +92,10 @@ func (_m *MockStore) LoadBaseHeight() (uint64, error) { return r0, r1 } - type MockStore_LoadBaseHeight_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadBaseHeight() *MockStore_LoadBaseHeight_Call { return &MockStore_LoadBaseHeight_Call{Call: _e.mock.On("LoadBaseHeight")} } @@ -126,7 +117,6 @@ func (_c *MockStore_LoadBaseHeight_Call) RunAndReturn(run func() (uint64, error) return _c } - func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { ret := _m.Called(height) @@ -156,13 +146,10 @@ func (_m *MockStore) LoadBlock(height uint64) (*types.Block, error) { return r0, r1 } - type MockStore_LoadBlock_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadBlock(height interface{}) *MockStore_LoadBlock_Call { return &MockStore_LoadBlock_Call{Call: _e.mock.On("LoadBlock", height)} } @@ -184,7 +171,6 @@ func (_c *MockStore_LoadBlock_Call) RunAndReturn(run func(uint64) (*types.Block, return _c } - func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { ret := _m.Called(hash) @@ -214,13 +200,10 @@ func (_m *MockStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return r0, r1 } - type MockStore_LoadBlockByHash_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadBlockByHash(hash interface{}) *MockStore_LoadBlockByHash_Call { return &MockStore_LoadBlockByHash_Call{Call: _e.mock.On("LoadBlockByHash", hash)} } @@ -242,7 +225,6 @@ func (_c *MockStore_LoadBlockByHash_Call) RunAndReturn(run func([32]byte) (*type return _c } - func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { ret := _m.Called(height) @@ -270,13 +252,10 @@ func (_m *MockStore) LoadBlockCid(height uint64) (cid.Cid, error) { return r0, r1 } - type MockStore_LoadBlockCid_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadBlockCid(height interface{}) *MockStore_LoadBlockCid_Call { return &MockStore_LoadBlockCid_Call{Call: _e.mock.On("LoadBlockCid", height)} } @@ -298,7 +277,6 @@ func (_c *MockStore_LoadBlockCid_Call) RunAndReturn(run func(uint64) (cid.Cid, e return _c } - func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, error) { ret := _m.Called(height) @@ -328,13 +306,10 @@ func (_m *MockStore) LoadBlockResponses(height uint64) (*state.ABCIResponses, er return r0, r1 } - type MockStore_LoadBlockResponses_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadBlockResponses(height interface{}) *MockStore_LoadBlockResponses_Call { return &MockStore_LoadBlockResponses_Call{Call: _e.mock.On("LoadBlockResponses", height)} } @@ -356,7 +331,6 @@ func (_c *MockStore_LoadBlockResponses_Call) RunAndReturn(run func(uint64) (*sta return _c } - func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { ret := _m.Called(height) @@ -384,13 +358,10 @@ func (_m *MockStore) LoadBlockSource(height uint64) (types.BlockSource, error) { return r0, r1 } - type MockStore_LoadBlockSource_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadBlockSource(height interface{}) *MockStore_LoadBlockSource_Call { return &MockStore_LoadBlockSource_Call{Call: _e.mock.On("LoadBlockSource", height)} } @@ -412,7 +383,6 @@ func (_c *MockStore_LoadBlockSource_Call) RunAndReturn(run func(uint64) (types.B return _c } - func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { ret := _m.Called() @@ -440,12 +410,10 @@ func (_m *MockStore) LoadBlockSyncBaseHeight() (uint64, error) { return r0, r1 } - type MockStore_LoadBlockSyncBaseHeight_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadBlockSyncBaseHeight() *MockStore_LoadBlockSyncBaseHeight_Call { return &MockStore_LoadBlockSyncBaseHeight_Call{Call: _e.mock.On("LoadBlockSyncBaseHeight")} } @@ -467,7 +435,6 @@ func (_c *MockStore_LoadBlockSyncBaseHeight_Call) RunAndReturn(run func() (uint6 return _c } - func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { ret := _m.Called(height) @@ -497,13 +464,10 @@ func (_m *MockStore) LoadCommit(height uint64) (*types.Commit, error) { return r0, r1 } - type MockStore_LoadCommit_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadCommit(height interface{}) *MockStore_LoadCommit_Call { return &MockStore_LoadCommit_Call{Call: _e.mock.On("LoadCommit", height)} } @@ -525,7 +489,6 @@ func (_c *MockStore_LoadCommit_Call) RunAndReturn(run func(uint64) (*types.Commi return _c } - func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { ret := _m.Called(hash) @@ -555,13 +518,10 @@ func (_m *MockStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return r0, r1 } - type MockStore_LoadCommitByHash_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadCommitByHash(hash interface{}) *MockStore_LoadCommitByHash_Call { return &MockStore_LoadCommitByHash_Call{Call: _e.mock.On("LoadCommitByHash", hash)} } @@ -583,7 +543,6 @@ func (_c *MockStore_LoadCommitByHash_Call) RunAndReturn(run func([32]byte) (*typ return _c } - func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { ret := _m.Called(height) @@ -611,13 +570,10 @@ func (_m *MockStore) LoadDRSVersion(height uint64) (uint32, error) { return r0, r1 } - type MockStore_LoadDRSVersion_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadDRSVersion(height interface{}) *MockStore_LoadDRSVersion_Call { return &MockStore_LoadDRSVersion_Call{Call: _e.mock.On("LoadDRSVersion", height)} } @@ -639,7 +595,6 @@ func (_c *MockStore_LoadDRSVersion_Call) RunAndReturn(run func(uint64) (uint32, return _c } - func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { ret := _m.Called() @@ -667,12 +622,10 @@ func (_m *MockStore) LoadIndexerBaseHeight() (uint64, error) { return r0, r1 } - type MockStore_LoadIndexerBaseHeight_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadIndexerBaseHeight() *MockStore_LoadIndexerBaseHeight_Call { return &MockStore_LoadIndexerBaseHeight_Call{Call: _e.mock.On("LoadIndexerBaseHeight")} } @@ -694,7 +647,6 @@ func (_c *MockStore_LoadIndexerBaseHeight_Call) RunAndReturn(run func() (uint64, return _c } - func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { ret := _m.Called() @@ -724,12 +676,10 @@ func (_m *MockStore) LoadLastBlockSequencerSet() (types.Sequencers, error) { return r0, r1 } - type MockStore_LoadLastBlockSequencerSet_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadLastBlockSequencerSet() *MockStore_LoadLastBlockSequencerSet_Call { return &MockStore_LoadLastBlockSequencerSet_Call{Call: _e.mock.On("LoadLastBlockSequencerSet")} } @@ -751,7 +701,6 @@ func (_c *MockStore_LoadLastBlockSequencerSet_Call) RunAndReturn(run func() (typ return _c } - func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { ret := _m.Called(height) @@ -779,13 +728,10 @@ func (_m *MockStore) LoadProposer(height uint64) (types.Sequencer, error) { return r0, r1 } - type MockStore_LoadProposer_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) LoadProposer(height interface{}) *MockStore_LoadProposer_Call { return &MockStore_LoadProposer_Call{Call: _e.mock.On("LoadProposer", height)} } @@ -807,7 +753,6 @@ func (_c *MockStore_LoadProposer_Call) RunAndReturn(run func(uint64) (types.Sequ return _c } - func (_m *MockStore) LoadState() (*types.State, error) { ret := _m.Called() @@ -837,12 +782,10 @@ func (_m *MockStore) LoadState() (*types.State, error) { return r0, r1 } - type MockStore_LoadState_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadState() *MockStore_LoadState_Call { return &MockStore_LoadState_Call{Call: _e.mock.On("LoadState")} } @@ -864,7 +807,6 @@ func (_c *MockStore_LoadState_Call) RunAndReturn(run func() (*types.State, error return _c } - func (_m *MockStore) LoadValidationHeight() (uint64, error) { ret := _m.Called() @@ -892,12 +834,10 @@ func (_m *MockStore) LoadValidationHeight() (uint64, error) { return r0, r1 } - type MockStore_LoadValidationHeight_Call struct { *mock.Call } - func (_e *MockStore_Expecter) LoadValidationHeight() *MockStore_LoadValidationHeight_Call { return &MockStore_LoadValidationHeight_Call{Call: _e.mock.On("LoadValidationHeight")} } @@ -919,7 +859,6 @@ func (_c *MockStore_LoadValidationHeight_Call) RunAndReturn(run func() (uint64, return _c } - func (_m *MockStore) NewBatch() store.KVBatch { ret := _m.Called() @@ -939,12 +878,10 @@ func (_m *MockStore) NewBatch() store.KVBatch { return r0 } - type MockStore_NewBatch_Call struct { *mock.Call } - func (_e *MockStore_Expecter) NewBatch() *MockStore_NewBatch_Call { return &MockStore_NewBatch_Call{Call: _e.mock.On("NewBatch")} } @@ -966,7 +903,6 @@ func (_c *MockStore_NewBatch_Call) RunAndReturn(run func() store.KVBatch) *MockS return _c } - func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { ret := _m.Called(to, logger) @@ -994,14 +930,10 @@ func (_m *MockStore) PruneStore(to uint64, logger types.Logger) (uint64, error) return r0, r1 } - type MockStore_PruneStore_Call struct { *mock.Call } - - - func (_e *MockStore_Expecter) PruneStore(to interface{}, logger interface{}) *MockStore_PruneStore_Call { return &MockStore_PruneStore_Call{Call: _e.mock.On("PruneStore", to, logger)} } @@ -1023,7 +955,6 @@ func (_c *MockStore_PruneStore_Call) RunAndReturn(run func(uint64, types.Logger) return _c } - func (_m *MockStore) RemoveBlockCid(height uint64) error { ret := _m.Called(height) @@ -1041,13 +972,10 @@ func (_m *MockStore) RemoveBlockCid(height uint64) error { return r0 } - type MockStore_RemoveBlockCid_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) RemoveBlockCid(height interface{}) *MockStore_RemoveBlockCid_Call { return &MockStore_RemoveBlockCid_Call{Call: _e.mock.On("RemoveBlockCid", height)} } @@ -1069,7 +997,6 @@ func (_c *MockStore_RemoveBlockCid_Call) RunAndReturn(run func(uint64) error) *M return _c } - func (_m *MockStore) SaveBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1087,13 +1014,10 @@ func (_m *MockStore) SaveBaseHeight(height uint64) error { return r0 } - type MockStore_SaveBaseHeight_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) SaveBaseHeight(height interface{}) *MockStore_SaveBaseHeight_Call { return &MockStore_SaveBaseHeight_Call{Call: _e.mock.On("SaveBaseHeight", height)} } @@ -1115,7 +1039,6 @@ func (_c *MockStore_SaveBaseHeight_Call) RunAndReturn(run func(uint64) error) *M return _c } - func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(block, commit, batch) @@ -1145,15 +1068,10 @@ func (_m *MockStore) SaveBlock(block *types.Block, commit *types.Commit, batch s return r0, r1 } - type MockStore_SaveBlock_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveBlock(block interface{}, commit interface{}, batch interface{}) *MockStore_SaveBlock_Call { return &MockStore_SaveBlock_Call{Call: _e.mock.On("SaveBlock", block, commit, batch)} } @@ -1175,7 +1093,6 @@ func (_c *MockStore_SaveBlock_Call) RunAndReturn(run func(*types.Block, *types.C return _c } - func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, _a1, batch) @@ -1205,15 +1122,10 @@ func (_m *MockStore) SaveBlockCid(height uint64, _a1 cid.Cid, batch store.KVBatc return r0, r1 } - type MockStore_SaveBlockCid_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveBlockCid(height interface{}, _a1 interface{}, batch interface{}) *MockStore_SaveBlockCid_Call { return &MockStore_SaveBlockCid_Call{Call: _e.mock.On("SaveBlockCid", height, _a1, batch)} } @@ -1235,7 +1147,6 @@ func (_c *MockStore_SaveBlockCid_Call) RunAndReturn(run func(uint64, cid.Cid, st return _c } - func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResponses, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, responses, batch) @@ -1265,15 +1176,10 @@ func (_m *MockStore) SaveBlockResponses(height uint64, responses *state.ABCIResp return r0, r1 } - type MockStore_SaveBlockResponses_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveBlockResponses(height interface{}, responses interface{}, batch interface{}) *MockStore_SaveBlockResponses_Call { return &MockStore_SaveBlockResponses_Call{Call: _e.mock.On("SaveBlockResponses", height, responses, batch)} } @@ -1295,7 +1201,6 @@ func (_c *MockStore_SaveBlockResponses_Call) RunAndReturn(run func(uint64, *stat return _c } - func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, source, batch) @@ -1325,15 +1230,10 @@ func (_m *MockStore) SaveBlockSource(height uint64, source types.BlockSource, ba return r0, r1 } - type MockStore_SaveBlockSource_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveBlockSource(height interface{}, source interface{}, batch interface{}) *MockStore_SaveBlockSource_Call { return &MockStore_SaveBlockSource_Call{Call: _e.mock.On("SaveBlockSource", height, source, batch)} } @@ -1355,7 +1255,6 @@ func (_c *MockStore_SaveBlockSource_Call) RunAndReturn(run func(uint64, types.Bl return _c } - func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1373,13 +1272,10 @@ func (_m *MockStore) SaveBlockSyncBaseHeight(height uint64) error { return r0 } - type MockStore_SaveBlockSyncBaseHeight_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) SaveBlockSyncBaseHeight(height interface{}) *MockStore_SaveBlockSyncBaseHeight_Call { return &MockStore_SaveBlockSyncBaseHeight_Call{Call: _e.mock.On("SaveBlockSyncBaseHeight", height)} } @@ -1401,7 +1297,6 @@ func (_c *MockStore_SaveBlockSyncBaseHeight_Call) RunAndReturn(run func(uint64) return _c } - func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, version, batch) @@ -1431,15 +1326,10 @@ func (_m *MockStore) SaveDRSVersion(height uint64, version uint32, batch store.K return r0, r1 } - type MockStore_SaveDRSVersion_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveDRSVersion(height interface{}, version interface{}, batch interface{}) *MockStore_SaveDRSVersion_Call { return &MockStore_SaveDRSVersion_Call{Call: _e.mock.On("SaveDRSVersion", height, version, batch)} } @@ -1461,7 +1351,6 @@ func (_c *MockStore_SaveDRSVersion_Call) RunAndReturn(run func(uint64, uint32, s return _c } - func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { ret := _m.Called(height) @@ -1479,13 +1368,10 @@ func (_m *MockStore) SaveIndexerBaseHeight(height uint64) error { return r0 } - type MockStore_SaveIndexerBaseHeight_Call struct { *mock.Call } - - func (_e *MockStore_Expecter) SaveIndexerBaseHeight(height interface{}) *MockStore_SaveIndexerBaseHeight_Call { return &MockStore_SaveIndexerBaseHeight_Call{Call: _e.mock.On("SaveIndexerBaseHeight", height)} } @@ -1507,7 +1393,6 @@ func (_c *MockStore_SaveIndexerBaseHeight_Call) RunAndReturn(run func(uint64) er return _c } - func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(sequencers, batch) @@ -1537,14 +1422,10 @@ func (_m *MockStore) SaveLastBlockSequencerSet(sequencers types.Sequencers, batc return r0, r1 } - type MockStore_SaveLastBlockSequencerSet_Call struct { *mock.Call } - - - func (_e *MockStore_Expecter) SaveLastBlockSequencerSet(sequencers interface{}, batch interface{}) *MockStore_SaveLastBlockSequencerSet_Call { return &MockStore_SaveLastBlockSequencerSet_Call{Call: _e.mock.On("SaveLastBlockSequencerSet", sequencers, batch)} } @@ -1566,7 +1447,6 @@ func (_c *MockStore_SaveLastBlockSequencerSet_Call) RunAndReturn(run func(types. return _c } - func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, proposer, batch) @@ -1596,15 +1476,10 @@ func (_m *MockStore) SaveProposer(height uint64, proposer types.Sequencer, batch return r0, r1 } - type MockStore_SaveProposer_Call struct { *mock.Call } - - - - func (_e *MockStore_Expecter) SaveProposer(height interface{}, proposer interface{}, batch interface{}) *MockStore_SaveProposer_Call { return &MockStore_SaveProposer_Call{Call: _e.mock.On("SaveProposer", height, proposer, batch)} } @@ -1626,7 +1501,6 @@ func (_c *MockStore_SaveProposer_Call) RunAndReturn(run func(uint64, types.Seque return _c } - func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(_a0, batch) @@ -1656,14 +1530,10 @@ func (_m *MockStore) SaveState(_a0 *types.State, batch store.KVBatch) (store.KVB return r0, r1 } - type MockStore_SaveState_Call struct { *mock.Call } - - - func (_e *MockStore_Expecter) SaveState(_a0 interface{}, batch interface{}) *MockStore_SaveState_Call { return &MockStore_SaveState_Call{Call: _e.mock.On("SaveState", _a0, batch)} } @@ -1685,7 +1555,6 @@ func (_c *MockStore_SaveState_Call) RunAndReturn(run func(*types.State, store.KV return _c } - func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (store.KVBatch, error) { ret := _m.Called(height, batch) @@ -1715,14 +1584,10 @@ func (_m *MockStore) SaveValidationHeight(height uint64, batch store.KVBatch) (s return r0, r1 } - type MockStore_SaveValidationHeight_Call struct { *mock.Call } - - - func (_e *MockStore_Expecter) SaveValidationHeight(height interface{}, batch interface{}) *MockStore_SaveValidationHeight_Call { return &MockStore_SaveValidationHeight_Call{Call: _e.mock.On("SaveValidationHeight", height, batch)} } @@ -1744,12 +1609,11 @@ func (_c *MockStore_SaveValidationHeight_Call) RunAndReturn(run func(uint64, sto return _c } - - func NewMockStore(t interface { mock.TestingT Cleanup(func()) -}) *MockStore { +}, +) *MockStore { mock := &MockStore{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go index 775ec233d..67ce91892 100644 --- a/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/third_party/dymension/sequencer/types/mock_QueryClient.go @@ -1,5 +1,3 @@ - - package types import ( @@ -12,7 +10,6 @@ import ( types "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - type MockQueryClient struct { mock.Mock } @@ -25,7 +22,6 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *types.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +58,10 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *typ return r0, r1 } - type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +90,6 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } - func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +126,10 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *types.Q return r0, r1 } - type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +158,6 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } - func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequest, opts ...grpc.CallOption) (*types.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +194,10 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *types.QueryParamsRequ return r0, r1 } - type MockQueryClient_Params_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +226,6 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *t return _c } - func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequencerRequest, opts ...grpc.CallOption) (*types.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +262,10 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *types.QueryGetSequ return r0, r1 } - type MockQueryClient_Sequencer_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +294,6 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } - func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequencersRequest, opts ...grpc.CallOption) (*types.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +330,10 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *types.QuerySequen return r0, r1 } - type MockQueryClient_Sequencers_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +362,6 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } - func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +398,10 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *types.Qu return r0, r1 } - type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +430,6 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } - func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *types.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*types.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +466,10 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } - type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -543,12 +498,11 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } - - func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) -}) *MockQueryClient { +}, +) *MockQueryClient { mock := &MockQueryClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go index c73eb7ea5..e52412a6a 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp/mock_QueryClient.go @@ -1,5 +1,3 @@ - - package rollapp import ( @@ -12,7 +10,6 @@ import ( rollapp "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - type MockQueryClient struct { mock.Mock } @@ -25,7 +22,6 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGetLatestHeightRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestHeightResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +58,10 @@ func (_m *MockQueryClient) LatestHeight(ctx context.Context, in *rollapp.QueryGe return r0, r1 } - type MockQueryClient_LatestHeight_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) LatestHeight(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestHeight_Call { return &MockQueryClient_LatestHeight_Call{Call: _e.mock.On("LatestHeight", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +90,6 @@ func (_c *MockQueryClient_LatestHeight_Call) RunAndReturn(run func(context.Conte return _c } - func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.QueryGetLatestStateIndexRequest, opts ...grpc.CallOption) (*rollapp.QueryGetLatestStateIndexResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +126,10 @@ func (_m *MockQueryClient) LatestStateIndex(ctx context.Context, in *rollapp.Que return r0, r1 } - type MockQueryClient_LatestStateIndex_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) LatestStateIndex(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_LatestStateIndex_Call { return &MockQueryClient_LatestStateIndex_Call{Call: _e.mock.On("LatestStateIndex", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +158,6 @@ func (_c *MockQueryClient_LatestStateIndex_Call) RunAndReturn(run func(context.C return _c } - func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp.QueryObsoleteDRSVersionsRequest, opts ...grpc.CallOption) (*rollapp.QueryObsoleteDRSVersionsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +194,10 @@ func (_m *MockQueryClient) ObsoleteDRSVersions(ctx context.Context, in *rollapp. return r0, r1 } - type MockQueryClient_ObsoleteDRSVersions_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) ObsoleteDRSVersions(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ObsoleteDRSVersions_Call { return &MockQueryClient_ObsoleteDRSVersions_Call{Call: _e.mock.On("ObsoleteDRSVersions", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +226,6 @@ func (_c *MockQueryClient_ObsoleteDRSVersions_Call) RunAndReturn(run func(contex return _c } - func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRequest, opts ...grpc.CallOption) (*rollapp.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +262,10 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *rollapp.QueryParamsRe return r0, r1 } - type MockQueryClient_Params_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +294,6 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *r return _c } - func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.QueryRegisteredDenomsRequest, opts ...grpc.CallOption) (*rollapp.QueryRegisteredDenomsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +330,10 @@ func (_m *MockQueryClient) RegisteredDenoms(ctx context.Context, in *rollapp.Que return r0, r1 } - type MockQueryClient_RegisteredDenoms_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) RegisteredDenoms(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RegisteredDenoms_Call { return &MockQueryClient_RegisteredDenoms_Call{Call: _e.mock.On("RegisteredDenoms", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +362,6 @@ func (_c *MockQueryClient_RegisteredDenoms_Call) RunAndReturn(run func(context.C return _c } - func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +398,10 @@ func (_m *MockQueryClient) Rollapp(ctx context.Context, in *rollapp.QueryGetRoll return r0, r1 } - type MockQueryClient_Rollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Rollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Rollapp_Call { return &MockQueryClient_Rollapp_Call{Call: _e.mock.On("Rollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +430,6 @@ func (_c *MockQueryClient_Rollapp_Call) RunAndReturn(run func(context.Context, * return _c } - func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllRollappRequest, opts ...grpc.CallOption) (*rollapp.QueryAllRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +466,10 @@ func (_m *MockQueryClient) RollappAll(ctx context.Context, in *rollapp.QueryAllR return r0, r1 } - type MockQueryClient_RollappAll_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) RollappAll(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappAll_Call { return &MockQueryClient_RollappAll_Call{Call: _e.mock.On("RollappAll", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +498,6 @@ func (_c *MockQueryClient_RollappAll_Call) RunAndReturn(run func(context.Context return _c } - func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.QueryGetRollappByEIP155Request, opts ...grpc.CallOption) (*rollapp.QueryGetRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +534,10 @@ func (_m *MockQueryClient) RollappByEIP155(ctx context.Context, in *rollapp.Quer return r0, r1 } - type MockQueryClient_RollappByEIP155_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) RollappByEIP155(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_RollappByEIP155_Call { return &MockQueryClient_RollappByEIP155_Call{Call: _e.mock.On("RollappByEIP155", append([]interface{}{ctx, in}, opts...)...)} @@ -617,7 +566,6 @@ func (_c *MockQueryClient_RollappByEIP155_Call) RunAndReturn(run func(context.Co return _c } - func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetStateInfoRequest, opts ...grpc.CallOption) (*rollapp.QueryGetStateInfoResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -654,15 +602,10 @@ func (_m *MockQueryClient) StateInfo(ctx context.Context, in *rollapp.QueryGetSt return r0, r1 } - type MockQueryClient_StateInfo_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) StateInfo(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_StateInfo_Call { return &MockQueryClient_StateInfo_Call{Call: _e.mock.On("StateInfo", append([]interface{}{ctx, in}, opts...)...)} @@ -691,7 +634,6 @@ func (_c *MockQueryClient_StateInfo_Call) RunAndReturn(run func(context.Context, return _c } - func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollapp.QueryValidateGenesisBridgeRequest, opts ...grpc.CallOption) (*rollapp.QueryValidateGenesisBridgeResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -728,15 +670,10 @@ func (_m *MockQueryClient) ValidateGenesisBridge(ctx context.Context, in *rollap return r0, r1 } - type MockQueryClient_ValidateGenesisBridge_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) ValidateGenesisBridge(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_ValidateGenesisBridge_Call { return &MockQueryClient_ValidateGenesisBridge_Call{Call: _e.mock.On("ValidateGenesisBridge", append([]interface{}{ctx, in}, opts...)...)} @@ -765,12 +702,11 @@ func (_c *MockQueryClient_ValidateGenesisBridge_Call) RunAndReturn(run func(cont return _c } - - func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) -}) *MockQueryClient { +}, +) *MockQueryClient { mock := &MockQueryClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go index 0b76b1a9b..3fe80e59e 100644 --- a/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go +++ b/mocks/github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer/mock_QueryClient.go @@ -1,5 +1,3 @@ - - package sequencer import ( @@ -12,7 +10,6 @@ import ( sequencer "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - type MockQueryClient struct { mock.Mock } @@ -25,7 +22,6 @@ func (_m *MockQueryClient) EXPECT() *MockQueryClient_Expecter { return &MockQueryClient_Expecter{mock: &_m.Mock} } - func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *sequencer.QueryGetNextProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetNextProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -62,15 +58,10 @@ func (_m *MockQueryClient) GetNextProposerByRollapp(ctx context.Context, in *seq return r0, r1 } - type MockQueryClient_GetNextProposerByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) GetNextProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetNextProposerByRollapp_Call { return &MockQueryClient_GetNextProposerByRollapp_Call{Call: _e.mock.On("GetNextProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -99,7 +90,6 @@ func (_c *MockQueryClient_GetNextProposerByRollapp_Call) RunAndReturn(run func(c return _c } - func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequencer.QueryGetProposerByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetProposerByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -136,15 +126,10 @@ func (_m *MockQueryClient) GetProposerByRollapp(ctx context.Context, in *sequenc return r0, r1 } - type MockQueryClient_GetProposerByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) GetProposerByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_GetProposerByRollapp_Call { return &MockQueryClient_GetProposerByRollapp_Call{Call: _e.mock.On("GetProposerByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -173,7 +158,6 @@ func (_c *MockQueryClient_GetProposerByRollapp_Call) RunAndReturn(run func(conte return _c } - func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParamsRequest, opts ...grpc.CallOption) (*sequencer.QueryParamsResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -210,15 +194,10 @@ func (_m *MockQueryClient) Params(ctx context.Context, in *sequencer.QueryParams return r0, r1 } - type MockQueryClient_Params_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Params(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Params_Call { return &MockQueryClient_Params_Call{Call: _e.mock.On("Params", append([]interface{}{ctx, in}, opts...)...)} @@ -247,7 +226,6 @@ func (_c *MockQueryClient_Params_Call) RunAndReturn(run func(context.Context, *s return _c } - func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryProposersRequest, opts ...grpc.CallOption) (*sequencer.QueryProposersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -284,15 +262,10 @@ func (_m *MockQueryClient) Proposers(ctx context.Context, in *sequencer.QueryPro return r0, r1 } - type MockQueryClient_Proposers_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Proposers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Proposers_Call { return &MockQueryClient_Proposers_Call{Call: _e.mock.On("Proposers", append([]interface{}{ctx, in}, opts...)...)} @@ -321,7 +294,6 @@ func (_c *MockQueryClient_Proposers_Call) RunAndReturn(run func(context.Context, return _c } - func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGetSequencerRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencerResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -358,15 +330,10 @@ func (_m *MockQueryClient) Sequencer(ctx context.Context, in *sequencer.QueryGet return r0, r1 } - type MockQueryClient_Sequencer_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Sequencer(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencer_Call { return &MockQueryClient_Sequencer_Call{Call: _e.mock.On("Sequencer", append([]interface{}{ctx, in}, opts...)...)} @@ -395,7 +362,6 @@ func (_c *MockQueryClient_Sequencer_Call) RunAndReturn(run func(context.Context, return _c } - func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySequencersRequest, opts ...grpc.CallOption) (*sequencer.QuerySequencersResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -432,15 +398,10 @@ func (_m *MockQueryClient) Sequencers(ctx context.Context, in *sequencer.QuerySe return r0, r1 } - type MockQueryClient_Sequencers_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) Sequencers(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_Sequencers_Call { return &MockQueryClient_Sequencers_Call{Call: _e.mock.On("Sequencers", append([]interface{}{ctx, in}, opts...)...)} @@ -469,7 +430,6 @@ func (_c *MockQueryClient_Sequencers_Call) RunAndReturn(run func(context.Context return _c } - func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequencer.QueryGetSequencersByRollappRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -506,15 +466,10 @@ func (_m *MockQueryClient) SequencersByRollapp(ctx context.Context, in *sequence return r0, r1 } - type MockQueryClient_SequencersByRollapp_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) SequencersByRollapp(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollapp_Call { return &MockQueryClient_SequencersByRollapp_Call{Call: _e.mock.On("SequencersByRollapp", append([]interface{}{ctx, in}, opts...)...)} @@ -543,7 +498,6 @@ func (_c *MockQueryClient_SequencersByRollapp_Call) RunAndReturn(run func(contex return _c } - func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in *sequencer.QueryGetSequencersByRollappByStatusRequest, opts ...grpc.CallOption) (*sequencer.QueryGetSequencersByRollappByStatusResponse, error) { _va := make([]interface{}, len(opts)) for _i := range opts { @@ -580,15 +534,10 @@ func (_m *MockQueryClient) SequencersByRollappByStatus(ctx context.Context, in * return r0, r1 } - type MockQueryClient_SequencersByRollappByStatus_Call struct { *mock.Call } - - - - func (_e *MockQueryClient_Expecter) SequencersByRollappByStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockQueryClient_SequencersByRollappByStatus_Call { return &MockQueryClient_SequencersByRollappByStatus_Call{Call: _e.mock.On("SequencersByRollappByStatus", append([]interface{}{ctx, in}, opts...)...)} @@ -617,12 +566,11 @@ func (_c *MockQueryClient_SequencersByRollappByStatus_Call) RunAndReturn(run fun return _c } - - func NewMockQueryClient(t interface { mock.TestingT Cleanup(func()) -}) *MockQueryClient { +}, +) *MockQueryClient { mock := &MockQueryClient{} mock.Mock.Test(t) diff --git a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go index db13fb1e2..68ea70e70 100644 --- a/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go +++ b/mocks/github.com/tendermint/tendermint/abci/types/mock_Application.go @@ -1,5 +1,3 @@ - - package types import ( @@ -7,7 +5,6 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) - type MockApplication struct { mock.Mock } @@ -20,7 +17,6 @@ func (_m *MockApplication) EXPECT() *MockApplication_Expecter { return &MockApplication_Expecter{mock: &_m.Mock} } - func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { ret := _m.Called(_a0) @@ -38,13 +34,10 @@ func (_m *MockApplication) ApplySnapshotChunk(_a0 types.RequestApplySnapshotChun return r0 } - type MockApplication_ApplySnapshotChunk_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) ApplySnapshotChunk(_a0 interface{}) *MockApplication_ApplySnapshotChunk_Call { return &MockApplication_ApplySnapshotChunk_Call{Call: _e.mock.On("ApplySnapshotChunk", _a0)} } @@ -66,7 +59,6 @@ func (_c *MockApplication_ApplySnapshotChunk_Call) RunAndReturn(run func(types.R return _c } - func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.ResponseBeginBlock { ret := _m.Called(_a0) @@ -84,13 +76,10 @@ func (_m *MockApplication) BeginBlock(_a0 types.RequestBeginBlock) types.Respons return r0 } - type MockApplication_BeginBlock_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) BeginBlock(_a0 interface{}) *MockApplication_BeginBlock_Call { return &MockApplication_BeginBlock_Call{Call: _e.mock.On("BeginBlock", _a0)} } @@ -112,7 +101,6 @@ func (_c *MockApplication_BeginBlock_Call) RunAndReturn(run func(types.RequestBe return _c } - func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheckTx { ret := _m.Called(_a0) @@ -130,13 +118,10 @@ func (_m *MockApplication) CheckTx(_a0 types.RequestCheckTx) types.ResponseCheck return r0 } - type MockApplication_CheckTx_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) CheckTx(_a0 interface{}) *MockApplication_CheckTx_Call { return &MockApplication_CheckTx_Call{Call: _e.mock.On("CheckTx", _a0)} } @@ -158,7 +143,6 @@ func (_c *MockApplication_CheckTx_Call) RunAndReturn(run func(types.RequestCheck return _c } - func (_m *MockApplication) Commit() types.ResponseCommit { ret := _m.Called() @@ -176,12 +160,10 @@ func (_m *MockApplication) Commit() types.ResponseCommit { return r0 } - type MockApplication_Commit_Call struct { *mock.Call } - func (_e *MockApplication_Expecter) Commit() *MockApplication_Commit_Call { return &MockApplication_Commit_Call{Call: _e.mock.On("Commit")} } @@ -203,7 +185,6 @@ func (_c *MockApplication_Commit_Call) RunAndReturn(run func() types.ResponseCom return _c } - func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseDeliverTx { ret := _m.Called(_a0) @@ -221,13 +202,10 @@ func (_m *MockApplication) DeliverTx(_a0 types.RequestDeliverTx) types.ResponseD return r0 } - type MockApplication_DeliverTx_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) DeliverTx(_a0 interface{}) *MockApplication_DeliverTx_Call { return &MockApplication_DeliverTx_Call{Call: _e.mock.On("DeliverTx", _a0)} } @@ -249,7 +227,6 @@ func (_c *MockApplication_DeliverTx_Call) RunAndReturn(run func(types.RequestDel return _c } - func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEndBlock { ret := _m.Called(_a0) @@ -267,13 +244,10 @@ func (_m *MockApplication) EndBlock(_a0 types.RequestEndBlock) types.ResponseEnd return r0 } - type MockApplication_EndBlock_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) EndBlock(_a0 interface{}) *MockApplication_EndBlock_Call { return &MockApplication_EndBlock_Call{Call: _e.mock.On("EndBlock", _a0)} } @@ -295,7 +269,6 @@ func (_c *MockApplication_EndBlock_Call) RunAndReturn(run func(types.RequestEndB return _c } - func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { ret := _m.Called(_a0) @@ -313,13 +286,10 @@ func (_m *MockApplication) Info(_a0 types.RequestInfo) types.ResponseInfo { return r0 } - type MockApplication_Info_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) Info(_a0 interface{}) *MockApplication_Info_Call { return &MockApplication_Info_Call{Call: _e.mock.On("Info", _a0)} } @@ -341,7 +311,6 @@ func (_c *MockApplication_Info_Call) RunAndReturn(run func(types.RequestInfo) ty return _c } - func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseInitChain { ret := _m.Called(_a0) @@ -359,13 +328,10 @@ func (_m *MockApplication) InitChain(_a0 types.RequestInitChain) types.ResponseI return r0 } - type MockApplication_InitChain_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) InitChain(_a0 interface{}) *MockApplication_InitChain_Call { return &MockApplication_InitChain_Call{Call: _e.mock.On("InitChain", _a0)} } @@ -387,7 +353,6 @@ func (_c *MockApplication_InitChain_Call) RunAndReturn(run func(types.RequestIni return _c } - func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.ResponseListSnapshots { ret := _m.Called(_a0) @@ -405,13 +370,10 @@ func (_m *MockApplication) ListSnapshots(_a0 types.RequestListSnapshots) types.R return r0 } - type MockApplication_ListSnapshots_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) ListSnapshots(_a0 interface{}) *MockApplication_ListSnapshots_Call { return &MockApplication_ListSnapshots_Call{Call: _e.mock.On("ListSnapshots", _a0)} } @@ -433,7 +395,6 @@ func (_c *MockApplication_ListSnapshots_Call) RunAndReturn(run func(types.Reques return _c } - func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { ret := _m.Called(_a0) @@ -451,13 +412,10 @@ func (_m *MockApplication) LoadSnapshotChunk(_a0 types.RequestLoadSnapshotChunk) return r0 } - type MockApplication_LoadSnapshotChunk_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) LoadSnapshotChunk(_a0 interface{}) *MockApplication_LoadSnapshotChunk_Call { return &MockApplication_LoadSnapshotChunk_Call{Call: _e.mock.On("LoadSnapshotChunk", _a0)} } @@ -479,7 +437,6 @@ func (_c *MockApplication_LoadSnapshotChunk_Call) RunAndReturn(run func(types.Re return _c } - func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.ResponseOfferSnapshot { ret := _m.Called(_a0) @@ -497,13 +454,10 @@ func (_m *MockApplication) OfferSnapshot(_a0 types.RequestOfferSnapshot) types.R return r0 } - type MockApplication_OfferSnapshot_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) OfferSnapshot(_a0 interface{}) *MockApplication_OfferSnapshot_Call { return &MockApplication_OfferSnapshot_Call{Call: _e.mock.On("OfferSnapshot", _a0)} } @@ -525,7 +479,6 @@ func (_c *MockApplication_OfferSnapshot_Call) RunAndReturn(run func(types.Reques return _c } - func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { ret := _m.Called(_a0) @@ -543,13 +496,10 @@ func (_m *MockApplication) Query(_a0 types.RequestQuery) types.ResponseQuery { return r0 } - type MockApplication_Query_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) Query(_a0 interface{}) *MockApplication_Query_Call { return &MockApplication_Query_Call{Call: _e.mock.On("Query", _a0)} } @@ -571,7 +521,6 @@ func (_c *MockApplication_Query_Call) RunAndReturn(run func(types.RequestQuery) return _c } - func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseSetOption { ret := _m.Called(_a0) @@ -589,13 +538,10 @@ func (_m *MockApplication) SetOption(_a0 types.RequestSetOption) types.ResponseS return r0 } - type MockApplication_SetOption_Call struct { *mock.Call } - - func (_e *MockApplication_Expecter) SetOption(_a0 interface{}) *MockApplication_SetOption_Call { return &MockApplication_SetOption_Call{Call: _e.mock.On("SetOption", _a0)} } @@ -617,12 +563,11 @@ func (_c *MockApplication_SetOption_Call) RunAndReturn(run func(types.RequestSet return _c } - - func NewMockApplication(t interface { mock.TestingT Cleanup(func()) -}) *MockApplication { +}, +) *MockApplication { mock := &MockApplication{} mock.Mock.Test(t) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go index fc03566e5..a63f14ea9 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConnConsensus.go @@ -1,5 +1,3 @@ - - package proxy import ( @@ -9,7 +7,6 @@ import ( types "github.com/tendermint/tendermint/abci/types" ) - type MockAppConnConsensus struct { mock.Mock } @@ -22,7 +19,6 @@ func (_m *MockAppConnConsensus) EXPECT() *MockAppConnConsensus_Expecter { return &MockAppConnConsensus_Expecter{mock: &_m.Mock} } - func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { ret := _m.Called(_a0) @@ -52,13 +48,10 @@ func (_m *MockAppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*ty return r0, r1 } - type MockAppConnConsensus_BeginBlockSync_Call struct { *mock.Call } - - func (_e *MockAppConnConsensus_Expecter) BeginBlockSync(_a0 interface{}) *MockAppConnConsensus_BeginBlockSync_Call { return &MockAppConnConsensus_BeginBlockSync_Call{Call: _e.mock.On("BeginBlockSync", _a0)} } @@ -80,7 +73,6 @@ func (_c *MockAppConnConsensus_BeginBlockSync_Call) RunAndReturn(run func(types. return _c } - func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { ret := _m.Called() @@ -110,12 +102,10 @@ func (_m *MockAppConnConsensus) CommitSync() (*types.ResponseCommit, error) { return r0, r1 } - type MockAppConnConsensus_CommitSync_Call struct { *mock.Call } - func (_e *MockAppConnConsensus_Expecter) CommitSync() *MockAppConnConsensus_CommitSync_Call { return &MockAppConnConsensus_CommitSync_Call{Call: _e.mock.On("CommitSync")} } @@ -137,7 +127,6 @@ func (_c *MockAppConnConsensus_CommitSync_Call) RunAndReturn(run func() (*types. return _c } - func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { ret := _m.Called(_a0) @@ -157,13 +146,10 @@ func (_m *MockAppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abci return r0 } - type MockAppConnConsensus_DeliverTxAsync_Call struct { *mock.Call } - - func (_e *MockAppConnConsensus_Expecter) DeliverTxAsync(_a0 interface{}) *MockAppConnConsensus_DeliverTxAsync_Call { return &MockAppConnConsensus_DeliverTxAsync_Call{Call: _e.mock.On("DeliverTxAsync", _a0)} } @@ -185,7 +171,6 @@ func (_c *MockAppConnConsensus_DeliverTxAsync_Call) RunAndReturn(run func(types. return _c } - func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { ret := _m.Called(_a0) @@ -215,13 +200,10 @@ func (_m *MockAppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types. return r0, r1 } - type MockAppConnConsensus_EndBlockSync_Call struct { *mock.Call } - - func (_e *MockAppConnConsensus_Expecter) EndBlockSync(_a0 interface{}) *MockAppConnConsensus_EndBlockSync_Call { return &MockAppConnConsensus_EndBlockSync_Call{Call: _e.mock.On("EndBlockSync", _a0)} } @@ -243,7 +225,6 @@ func (_c *MockAppConnConsensus_EndBlockSync_Call) RunAndReturn(run func(types.Re return _c } - func (_m *MockAppConnConsensus) Error() error { ret := _m.Called() @@ -261,12 +242,10 @@ func (_m *MockAppConnConsensus) Error() error { return r0 } - type MockAppConnConsensus_Error_Call struct { *mock.Call } - func (_e *MockAppConnConsensus_Expecter) Error() *MockAppConnConsensus_Error_Call { return &MockAppConnConsensus_Error_Call{Call: _e.mock.On("Error")} } @@ -288,7 +267,6 @@ func (_c *MockAppConnConsensus_Error_Call) RunAndReturn(run func() error) *MockA return _c } - func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0) @@ -318,13 +296,10 @@ func (_m *MockAppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*type return r0, r1 } - type MockAppConnConsensus_InitChainSync_Call struct { *mock.Call } - - func (_e *MockAppConnConsensus_Expecter) InitChainSync(_a0 interface{}) *MockAppConnConsensus_InitChainSync_Call { return &MockAppConnConsensus_InitChainSync_Call{Call: _e.mock.On("InitChainSync", _a0)} } @@ -346,18 +321,14 @@ func (_c *MockAppConnConsensus_InitChainSync_Call) RunAndReturn(run func(types.R return _c } - func (_m *MockAppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { _m.Called(_a0) } - type MockAppConnConsensus_SetResponseCallback_Call struct { *mock.Call } - - func (_e *MockAppConnConsensus_Expecter) SetResponseCallback(_a0 interface{}) *MockAppConnConsensus_SetResponseCallback_Call { return &MockAppConnConsensus_SetResponseCallback_Call{Call: _e.mock.On("SetResponseCallback", _a0)} } @@ -379,12 +350,11 @@ func (_c *MockAppConnConsensus_SetResponseCallback_Call) RunAndReturn(run func(a return _c } - - func NewMockAppConnConsensus(t interface { mock.TestingT Cleanup(func()) -}) *MockAppConnConsensus { +}, +) *MockAppConnConsensus { mock := &MockAppConnConsensus{} mock.Mock.Test(t) diff --git a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go index ea1b7934a..dcada3d46 100644 --- a/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go +++ b/mocks/github.com/tendermint/tendermint/proxy/mock_AppConns.go @@ -1,5 +1,3 @@ - - package proxy import ( @@ -9,7 +7,6 @@ import ( proxy "github.com/tendermint/tendermint/proxy" ) - type MockAppConns struct { mock.Mock } @@ -22,7 +19,6 @@ func (_m *MockAppConns) EXPECT() *MockAppConns_Expecter { return &MockAppConns_Expecter{mock: &_m.Mock} } - func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { ret := _m.Called() @@ -42,12 +38,10 @@ func (_m *MockAppConns) Consensus() proxy.AppConnConsensus { return r0 } - type MockAppConns_Consensus_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Consensus() *MockAppConns_Consensus_Call { return &MockAppConns_Consensus_Call{Call: _e.mock.On("Consensus")} } @@ -69,7 +63,6 @@ func (_c *MockAppConns_Consensus_Call) RunAndReturn(run func() proxy.AppConnCons return _c } - func (_m *MockAppConns) IsRunning() bool { ret := _m.Called() @@ -87,12 +80,10 @@ func (_m *MockAppConns) IsRunning() bool { return r0 } - type MockAppConns_IsRunning_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) IsRunning() *MockAppConns_IsRunning_Call { return &MockAppConns_IsRunning_Call{Call: _e.mock.On("IsRunning")} } @@ -114,7 +105,6 @@ func (_c *MockAppConns_IsRunning_Call) RunAndReturn(run func() bool) *MockAppCon return _c } - func (_m *MockAppConns) Mempool() proxy.AppConnMempool { ret := _m.Called() @@ -134,12 +124,10 @@ func (_m *MockAppConns) Mempool() proxy.AppConnMempool { return r0 } - type MockAppConns_Mempool_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Mempool() *MockAppConns_Mempool_Call { return &MockAppConns_Mempool_Call{Call: _e.mock.On("Mempool")} } @@ -161,7 +149,6 @@ func (_c *MockAppConns_Mempool_Call) RunAndReturn(run func() proxy.AppConnMempoo return _c } - func (_m *MockAppConns) OnReset() error { ret := _m.Called() @@ -179,12 +166,10 @@ func (_m *MockAppConns) OnReset() error { return r0 } - type MockAppConns_OnReset_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) OnReset() *MockAppConns_OnReset_Call { return &MockAppConns_OnReset_Call{Call: _e.mock.On("OnReset")} } @@ -206,7 +191,6 @@ func (_c *MockAppConns_OnReset_Call) RunAndReturn(run func() error) *MockAppConn return _c } - func (_m *MockAppConns) OnStart() error { ret := _m.Called() @@ -224,12 +208,10 @@ func (_m *MockAppConns) OnStart() error { return r0 } - type MockAppConns_OnStart_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) OnStart() *MockAppConns_OnStart_Call { return &MockAppConns_OnStart_Call{Call: _e.mock.On("OnStart")} } @@ -251,17 +233,14 @@ func (_c *MockAppConns_OnStart_Call) RunAndReturn(run func() error) *MockAppConn return _c } - func (_m *MockAppConns) OnStop() { _m.Called() } - type MockAppConns_OnStop_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) OnStop() *MockAppConns_OnStop_Call { return &MockAppConns_OnStop_Call{Call: _e.mock.On("OnStop")} } @@ -283,7 +262,6 @@ func (_c *MockAppConns_OnStop_Call) RunAndReturn(run func()) *MockAppConns_OnSto return _c } - func (_m *MockAppConns) Query() proxy.AppConnQuery { ret := _m.Called() @@ -303,12 +281,10 @@ func (_m *MockAppConns) Query() proxy.AppConnQuery { return r0 } - type MockAppConns_Query_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Query() *MockAppConns_Query_Call { return &MockAppConns_Query_Call{Call: _e.mock.On("Query")} } @@ -330,7 +306,6 @@ func (_c *MockAppConns_Query_Call) RunAndReturn(run func() proxy.AppConnQuery) * return _c } - func (_m *MockAppConns) Quit() <-chan struct{} { ret := _m.Called() @@ -350,12 +325,10 @@ func (_m *MockAppConns) Quit() <-chan struct{} { return r0 } - type MockAppConns_Quit_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Quit() *MockAppConns_Quit_Call { return &MockAppConns_Quit_Call{Call: _e.mock.On("Quit")} } @@ -377,7 +350,6 @@ func (_c *MockAppConns_Quit_Call) RunAndReturn(run func() <-chan struct{}) *Mock return _c } - func (_m *MockAppConns) Reset() error { ret := _m.Called() @@ -395,12 +367,10 @@ func (_m *MockAppConns) Reset() error { return r0 } - type MockAppConns_Reset_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Reset() *MockAppConns_Reset_Call { return &MockAppConns_Reset_Call{Call: _e.mock.On("Reset")} } @@ -422,18 +392,14 @@ func (_c *MockAppConns_Reset_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } - func (_m *MockAppConns) SetLogger(_a0 log.Logger) { _m.Called(_a0) } - type MockAppConns_SetLogger_Call struct { *mock.Call } - - func (_e *MockAppConns_Expecter) SetLogger(_a0 interface{}) *MockAppConns_SetLogger_Call { return &MockAppConns_SetLogger_Call{Call: _e.mock.On("SetLogger", _a0)} } @@ -455,7 +421,6 @@ func (_c *MockAppConns_SetLogger_Call) RunAndReturn(run func(log.Logger)) *MockA return _c } - func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { ret := _m.Called() @@ -475,12 +440,10 @@ func (_m *MockAppConns) Snapshot() proxy.AppConnSnapshot { return r0 } - type MockAppConns_Snapshot_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Snapshot() *MockAppConns_Snapshot_Call { return &MockAppConns_Snapshot_Call{Call: _e.mock.On("Snapshot")} } @@ -502,7 +465,6 @@ func (_c *MockAppConns_Snapshot_Call) RunAndReturn(run func() proxy.AppConnSnaps return _c } - func (_m *MockAppConns) Start() error { ret := _m.Called() @@ -520,12 +482,10 @@ func (_m *MockAppConns) Start() error { return r0 } - type MockAppConns_Start_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Start() *MockAppConns_Start_Call { return &MockAppConns_Start_Call{Call: _e.mock.On("Start")} } @@ -547,7 +507,6 @@ func (_c *MockAppConns_Start_Call) RunAndReturn(run func() error) *MockAppConns_ return _c } - func (_m *MockAppConns) Stop() error { ret := _m.Called() @@ -565,12 +524,10 @@ func (_m *MockAppConns) Stop() error { return r0 } - type MockAppConns_Stop_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) Stop() *MockAppConns_Stop_Call { return &MockAppConns_Stop_Call{Call: _e.mock.On("Stop")} } @@ -592,7 +549,6 @@ func (_c *MockAppConns_Stop_Call) RunAndReturn(run func() error) *MockAppConns_S return _c } - func (_m *MockAppConns) String() string { ret := _m.Called() @@ -610,12 +566,10 @@ func (_m *MockAppConns) String() string { return r0 } - type MockAppConns_String_Call struct { *mock.Call } - func (_e *MockAppConns_Expecter) String() *MockAppConns_String_Call { return &MockAppConns_String_Call{Call: _e.mock.On("String")} } @@ -637,12 +591,11 @@ func (_c *MockAppConns_String_Call) RunAndReturn(run func() string) *MockAppConn return _c } - - func NewMockAppConns(t interface { mock.TestingT Cleanup(func()) -}) *MockAppConns { +}, +) *MockAppConns { mock := &MockAppConns{} mock.Mock.Test(t) diff --git a/node/events/types.go b/node/events/types.go index 069ee116c..75aad4255 100644 --- a/node/events/types.go +++ b/node/events/types.go @@ -6,24 +6,17 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) - const ( - NodeTypeKey = "node.event" ) - - const ( HealthStatus = "HealthStatus" ) - - var HealthStatusList = map[string][]string{NodeTypeKey: {HealthStatus}} type DataHealthStatus struct { - Error error } @@ -31,6 +24,4 @@ func (dhs DataHealthStatus) String() string { return fmt.Sprintf("DataHealthStatus{Error: %v}", dhs.Error) } - - var QueryHealthStatus = uevent.QueryFor(NodeTypeKey, HealthStatus) diff --git a/node/mempool/mempool.go b/node/mempool/mempool.go index 1d7c53310..4a588a8bb 100644 --- a/node/mempool/mempool.go +++ b/node/mempool/mempool.go @@ -15,12 +15,10 @@ const ( type MempoolIDs struct { mtx tmsync.RWMutex peerMap map[peer.ID]uint16 - nextID uint16 - activeIDs map[uint16]struct{} + nextID uint16 + activeIDs map[uint16]struct{} } - - func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -30,8 +28,6 @@ func (ids *MempoolIDs) ReserveForPeer(peer peer.ID) { ids.activeIDs[curID] = struct{}{} } - - func (ids *MempoolIDs) nextPeerID() uint16 { if len(ids.activeIDs) == maxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", maxActiveIDs)) @@ -47,7 +43,6 @@ func (ids *MempoolIDs) nextPeerID() uint16 { return curID } - func (ids *MempoolIDs) Reclaim(peer peer.ID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -59,7 +54,6 @@ func (ids *MempoolIDs) Reclaim(peer peer.ID) { } } - func (ids *MempoolIDs) GetForPeer(peer peer.ID) uint16 { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -78,6 +72,6 @@ func NewMempoolIDs() *MempoolIDs { return &MempoolIDs{ peerMap: make(map[peer.ID]uint16), activeIDs: map[uint16]struct{}{0: {}}, - nextID: 1, + nextID: 1, } } diff --git a/node/node.go b/node/node.go index 7cfe74792..abee7459c 100644 --- a/node/node.go +++ b/node/node.go @@ -34,15 +34,12 @@ import ( "github.com/dymensionxyz/dymint/store" ) - var ( mainPrefix = []byte{0} dalcPrefix = []byte{1} indexerPrefix = []byte{2} ) - - type Node struct { service.BaseService eventBus *tmtypes.EventBus @@ -54,7 +51,6 @@ type Node struct { conf config.NodeConfig P2P *p2p.Client - Mempool mempool.Mempool MempoolIDs *nodemempool.MempoolIDs incomingTxCh chan *p2p.GossipMessage @@ -68,12 +64,10 @@ type Node struct { BlockIndexer indexer.BlockIndexer IndexerService *txindex.IndexerService - ctx context.Context cancel context.CancelFunc } - func NewNode( ctx context.Context, conf config.NodeConfig, @@ -102,12 +96,12 @@ func NewNode( var baseKV store.KV var dstore datastore.Datastore - if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { + if conf.DBConfig.InMemory || (conf.RootDir == "" && conf.DBPath == "") { logger.Info("WARNING: working in in-memory mode") baseKV = store.NewDefaultInMemoryKVStore() dstore = datastore.NewMapDatastore() } else { - + baseKV = store.NewKVStore(conf.RootDir, conf.DBPath, "dymint", conf.DBConfig.SyncWrites, logger) path := filepath.Join(store.Rootify(conf.RootDir, conf.DBPath), "blocksync") var err error @@ -120,9 +114,8 @@ func NewNode( s := store.New(store.NewPrefixKV(baseKV, mainPrefix)) indexerKV := store.NewPrefixKV(baseKV, indexerPrefix) - dalcKV := store.NewPrefixKV(baseKV, dalcPrefix) - + settlementlc := slregistry.GetClient(slregistry.Client(conf.SettlementLayer)) if settlementlc == nil { return nil, fmt.Errorf("get settlement client: named: %s", conf.SettlementLayer) @@ -161,7 +154,7 @@ func NewNode( settlementlc, eventBus, pubsubServer, - nil, + nil, dalcKV, indexerService, logger, @@ -170,7 +163,6 @@ func NewNode( return nil, fmt.Errorf("BlockManager initialization: %w", err) } - p2pValidator := p2p.NewValidator(logger.With("module", "p2p_validator"), blockManager) p2pClient, err := p2p.NewClient(conf.P2PConfig, p2pKey, genesis.ChainID, s, pubsubServer, dstore, logger.With("module", "p2p")) if err != nil { @@ -179,7 +171,6 @@ func NewNode( p2pClient.SetTxValidator(p2pValidator.TxValidator(mp, mpIDs)) p2pClient.SetBlockValidator(p2pValidator.BlockValidator()) - blockManager.P2PClient = p2pClient ctx, cancel := context.WithCancel(ctx) @@ -209,7 +200,6 @@ func NewNode( return node, nil } - func (n *Node) OnStart() error { n.Logger.Info("starting P2P client") err := n.P2P.Start(n.ctx) @@ -234,7 +224,6 @@ func (n *Node) OnStart() error { } }() - err = n.BlockManager.Start(n.ctx) if err != nil { return fmt.Errorf("while starting block manager: %w", err) @@ -243,12 +232,10 @@ func (n *Node) OnStart() error { return nil } - func (n *Node) GetGenesis() *tmtypes.GenesisDoc { return n.genesis } - func (n *Node) OnStop() { err := n.BlockManager.DAClient.Stop() if err != nil { @@ -273,32 +260,26 @@ func (n *Node) OnStop() { n.cancel() } - func (n *Node) OnReset() error { panic("OnReset - not implemented!") } - func (n *Node) SetLogger(logger log.Logger) { n.Logger = logger } - func (n *Node) GetLogger() log.Logger { return n.Logger } - func (n *Node) EventBus() *tmtypes.EventBus { return n.eventBus } - func (n *Node) PubSubServer() *pubsub.Server { return n.PubsubServer } - func (n *Node) ProxyApp() proxy.AppConns { return n.proxyApp } diff --git a/node/node_test.go b/node/node_test.go index 65f262fab..392926769 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -33,7 +33,6 @@ import ( // simply check that node is starting and stopping without panicking func TestStartup(t *testing.T) { - version.DRS = "0" assert := assert.New(t) require := require.New(t) diff --git a/p2p/block.go b/p2p/block.go index 754c17973..931304b8c 100644 --- a/p2p/block.go +++ b/p2p/block.go @@ -6,24 +6,16 @@ import ( tmcrypto "github.com/tendermint/tendermint/crypto" ) - - - - - type BlockData struct { - Block types.Block - + Commit types.Commit } - func (b *BlockData) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - func (b *BlockData) UnmarshalBinary(data []byte) error { var pbBlock pb.BlockData err := pbBlock.Unmarshal(data) @@ -34,7 +26,6 @@ func (b *BlockData) UnmarshalBinary(data []byte) error { return err } - func (b *BlockData) ToProto() *pb.BlockData { return &pb.BlockData{ Block: b.Block.ToProto(), @@ -42,7 +33,6 @@ func (b *BlockData) ToProto() *pb.BlockData { } } - func (b *BlockData) FromProto(other *pb.BlockData) error { if err := b.Block.FromProto(other.Block); err != nil { return err @@ -53,7 +43,6 @@ func (b *BlockData) FromProto(other *pb.BlockData) error { return nil } - func (b *BlockData) Validate(proposerPubKey tmcrypto.PubKey) error { if err := b.Block.ValidateBasic(); err != nil { return err diff --git a/p2p/block_sync.go b/p2p/block_sync.go index 49cfefb93..f09ae9a57 100644 --- a/p2p/block_sync.go +++ b/p2p/block_sync.go @@ -20,48 +20,36 @@ import ( "github.com/libp2p/go-libp2p/core/host" ) - - - - - type BlockSync struct { - bsrv blockservice.BlockService - + bstore blockstore.Blockstore - + net network.BitSwapNetwork - + dsrv BlockSyncDagService - + cidBuilder cid.Builder logger types.Logger } type BlockSyncMessageHandler func(block *BlockData) - func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, logger types.Logger) *BlockSync { - ds := dsync.MutexWrap(store) - bs := blockstore.NewBlockstore(ds) - bsnet := network.NewFromIpfsHost(h, &routinghelpers.Null{}, network.Prefix("/dymension/block-sync/")) - bsserver := server.New( ctx, bsnet, bs, - server.ProvideEnabled(false), + server.ProvideEnabled(false), server.SetSendDontHaves(false), ) - bsclient := client.New( ctx, bsnet, @@ -71,7 +59,6 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, client.WithoutDuplicatedBlockStats(), ) - bsnet.Start(bsserver, bsclient) bsrv := blockservice.New(bs, bsclient) @@ -93,12 +80,10 @@ func SetupBlockSync(ctx context.Context, h host.Host, store datastore.Datastore, return blockSync } - func (blocksync *BlockSync) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { return blocksync.dsrv.SaveBlock(ctx, block) } - func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockData, error) { blockBytes, err := blocksync.dsrv.LoadBlock(ctx, cid) if err != nil { @@ -111,7 +96,6 @@ func (blocksync *BlockSync) LoadBlock(ctx context.Context, cid cid.Cid) (BlockDa return block, nil } - func (blocksync *BlockSync) DeleteBlock(ctx context.Context, cid cid.Cid) error { return blocksync.dsrv.DeleteBlock(ctx, cid) } diff --git a/p2p/block_sync_dag.go b/p2p/block_sync_dag.go index 2502d9cd5..bdd65c6c9 100644 --- a/p2p/block_sync_dag.go +++ b/p2p/block_sync_dag.go @@ -21,8 +21,6 @@ type BlockSyncDagService struct { cidBuilder cid.Builder } - - func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { bsDagService := &BlockSyncDagService{ cidBuilder: &cid.Prefix{ @@ -37,15 +35,12 @@ func NewDAGService(bsrv blockservice.BlockService) BlockSyncDagService { return *bsDagService } - - func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block []byte) (cid.Cid, error) { blockReader := bytes.NewReader(block) splitter := chunker.NewSizeSplitter(blockReader, chunker.DefaultBlockSize) nodes := []*dag.ProtoNode{} - for { nextData, err := splitter.NextBytes() if err == io.EOF { @@ -63,14 +58,12 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] } - root := dag.NodeWithData(nil) err := root.SetCidBuilder(bsDagService.cidBuilder) if err != nil { return cid.Undef, err } - for _, n := range nodes { err := root.AddNodeLink(n.Cid().String(), n) @@ -90,21 +83,17 @@ func (bsDagService *BlockSyncDagService) SaveBlock(ctx context.Context, block [] return root.Cid(), nil } - func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid.Cid) ([]byte, error) { - nd, err := bsDagService.Get(ctx, cid) if err != nil { return nil, err } - read, err := dagReader(nd, bsDagService) if err != nil { return nil, err } - data, err := io.ReadAll(read) if err != nil { return nil, err @@ -113,13 +102,11 @@ func (bsDagService *BlockSyncDagService) LoadBlock(ctx context.Context, cid cid. } func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid cid.Cid) error { - root, err := bsDagService.Get(ctx, cid) if err != nil { return err } - for _, l := range root.Links() { err := bsDagService.Remove(ctx, l.Cid) if err != nil { @@ -129,12 +116,10 @@ func (bsDagService *BlockSyncDagService) DeleteBlock(ctx context.Context, cid ci return nil } - func dagReader(root ipld.Node, ds ipld.DAGService) (io.Reader, error) { ctx := context.Background() buf := new(bytes.Buffer) - for _, l := range root.Links() { n, err := ds.Get(ctx, l.Cid) if err != nil { diff --git a/p2p/blocks_received.go b/p2p/blocks_received.go index 0541f9599..f8e07e076 100644 --- a/p2p/blocks_received.go +++ b/p2p/blocks_received.go @@ -2,15 +2,13 @@ package p2p import "sync" - type BlocksReceived struct { blocksReceived map[uint64]struct{} latestSeenHeight uint64 - + blockReceivedMu sync.Mutex } - func (br *BlocksReceived) AddBlockReceived(height uint64) { br.latestSeenHeight = max(height, br.latestSeenHeight) br.blockReceivedMu.Lock() @@ -18,7 +16,6 @@ func (br *BlocksReceived) AddBlockReceived(height uint64) { br.blocksReceived[height] = struct{}{} } - func (br *BlocksReceived) IsBlockReceived(height uint64) bool { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -26,7 +23,6 @@ func (br *BlocksReceived) IsBlockReceived(height uint64) bool { return ok } - func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { br.blockReceivedMu.Lock() defer br.blockReceivedMu.Unlock() @@ -37,7 +33,6 @@ func (br *BlocksReceived) RemoveBlocksReceivedUpToHeight(appliedHeight uint64) { } } - func (br *BlocksReceived) GetLatestSeenHeight() uint64 { return br.latestSeenHeight } diff --git a/p2p/client.go b/p2p/client.go index d21979efe..3672b2138 100644 --- a/p2p/client.go +++ b/p2p/client.go @@ -33,29 +33,18 @@ import ( "github.com/dymensionxyz/dymint/types" ) - const ( - reAdvertisePeriod = 1 * time.Hour - peerLimit = 60 - txTopicSuffix = "-tx" - blockTopicSuffix = "-block" - blockSyncProtocolPrefix = "block-sync" ) - - - - - type Client struct { conf config.P2PConfig chainID string @@ -71,18 +60,14 @@ type Client struct { blockGossiper *Gossiper blockValidator GossipValidator - - cancel context.CancelFunc localPubsubServer *tmpubsub.Server logger types.Logger - blocksync *BlockSync - blockSyncStore datastore.Datastore store store.Store @@ -90,10 +75,6 @@ type Client struct { blocksReceived *BlocksReceived } - - - - func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, store store.Store, localPubsubServer *tmpubsub.Server, blockSyncStore datastore.Datastore, logger types.Logger) (*Client, error) { if privKey == nil { return nil, fmt.Errorf("private key: %w", gerrc.ErrNotFound) @@ -116,15 +97,7 @@ func NewClient(conf config.P2PConfig, privKey crypto.PrivKey, chainID string, st }, nil } - - - - - - - func (c *Client) Start(ctx context.Context) error { - ctx, c.cancel = context.WithCancel(ctx) host, err := c.listen() if err != nil { @@ -171,7 +144,6 @@ func (c *Client) StartWithHost(ctx context.Context, h host.Host) error { return nil } - func (c *Client) Close() error { c.cancel() @@ -183,24 +155,20 @@ func (c *Client) Close() error { ) } - func (c *Client) GossipTx(ctx context.Context, tx []byte) error { c.logger.Debug("Gossiping transaction.", "len", len(tx)) return c.txGossiper.Publish(ctx, tx) } - func (c *Client) SetTxValidator(val GossipValidator) { c.txValidator = val } - func (c *Client) GossipBlock(ctx context.Context, blockBytes []byte) error { c.logger.Debug("Gossiping block.", "len", len(blockBytes)) return c.blockGossiper.Publish(ctx, blockBytes) } - func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, blockBytes []byte) error { if !c.conf.BlockSyncEnabled { return nil @@ -228,7 +196,6 @@ func (c *Client) SaveBlock(ctx context.Context, height uint64, revision uint64, return nil } - func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { prunedBlocks := uint64(0) @@ -269,13 +236,11 @@ func (c *Client) RemoveBlocks(ctx context.Context, to uint64) (uint64, error) { return prunedBlocks, nil } - func (c *Client) AdvertiseBlockIdToDHT(ctx context.Context, height uint64, revision uint64, cid cid.Cid) error { err := c.DHT.PutValue(ctx, getBlockSyncKeyByHeight(height, revision), []byte(cid.String())) return err } - func (c *Client) GetBlockIdFromDHT(ctx context.Context, height uint64, revision uint64) (cid.Cid, error) { cidBytes, err := c.DHT.GetValue(ctx, getBlockSyncKeyByHeight(height, revision)) if err != nil { @@ -288,23 +253,18 @@ func (c *Client) UpdateLatestSeenHeight(height uint64) { c.blocksReceived.latestSeenHeight = max(height, c.blocksReceived.latestSeenHeight) } - func (c *Client) SetBlockValidator(validator GossipValidator) { c.blockValidator = validator } - func (c *Client) Addrs() []multiaddr.Multiaddr { return c.Host.Addrs() } - func (c *Client) Info() (p2p.ID, string, string) { return p2p.ID(hex.EncodeToString([]byte(c.Host.ID()))), c.conf.ListenAddress, c.chainID } - - type PeerConnection struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` @@ -312,7 +272,6 @@ type PeerConnection struct { RemoteIP string `json:"remote_ip"` } - func (c *Client) Peers() []PeerConnection { conns := c.Host.Network().Conns() res := make([]PeerConnection, 0, len(conns)) @@ -322,12 +281,10 @@ func (c *Client) Peers() []PeerConnection { ListenAddr: c.conf.ListenAddress, Network: c.chainID, DefaultNodeID: p2p.ID(conn.RemotePeer().String()), - }, IsOutbound: conn.Stat().Direction == network.DirOutbound, ConnectionStatus: p2p.ConnectionStatus{ Duration: time.Since(conn.Stat().Opened), - }, RemoteIP: conn.RemoteMultiaddr().String(), } @@ -407,7 +364,6 @@ func (c *Client) peerDiscovery(ctx context.Context) error { } func (c *Client) setupPeerDiscovery(ctx context.Context) error { - select { case <-ctx.Done(): return nil @@ -443,7 +399,6 @@ func (c *Client) findPeers(ctx context.Context) error { return nil } - func (c *Client) tryConnect(ctx context.Context, peer peer.AddrInfo) { c.logger.Debug("Trying to connect to peer.", "peer", peer) err := c.Host.Connect(ctx, peer) @@ -463,7 +418,6 @@ func (c *Client) setupGossiping(ctx context.Context) error { return err } - c.txGossiper, err = NewGossiper(c.Host, ps, c.getTxTopic(), nil, c.logger, WithValidator(c.txValidator)) if err != nil { return err @@ -502,43 +456,33 @@ func (c *Client) GetSeedAddrInfo(seedStr string) []peer.AddrInfo { return addrs } - - - - func (c *Client) getNamespace() string { return c.chainID } - func (c *Client) getTxTopic() string { return c.getNamespace() + txTopicSuffix } - func (c *Client) getBlockTopic() string { return c.getNamespace() + blockTopicSuffix } - - func (c *Client) NewTxValidator() GossipValidator { return func(g *GossipMessage) bool { return true } } - func (c *Client) blockSyncReceived(block *BlockData) { err := c.localPubsubServer.PublishWithEvents(context.Background(), *block, map[string][]string{EventTypeKey: {EventNewBlockSyncBlock}}) if err != nil { c.logger.Error("Publishing event.", "err", err) } - + c.blocksReceived.AddBlockReceived(block.Block.Header.Height) } - func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { var gossipedBlock BlockData if err := gossipedBlock.UnmarshalBinary(block); err != nil { @@ -550,7 +494,7 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { } if c.conf.BlockSyncEnabled { _, err := c.store.LoadBlockCid(gossipedBlock.Block.Header.Height) - + if err == nil { return } @@ -558,13 +502,11 @@ func (c *Client) blockGossipReceived(ctx context.Context, block []byte) { if err != nil { c.logger.Error("Adding block to blocksync store.", "err", err, "height", gossipedBlock.Block.Header.Height) } - + c.blocksReceived.AddBlockReceived(gossipedBlock.Block.Header.Height) } } - - func (c *Client) bootstrapLoop(ctx context.Context) { ticker := time.NewTicker(c.conf.BootstrapRetryTime) defer ticker.Stop() @@ -590,7 +532,6 @@ func (c *Client) bootstrapLoop(ctx context.Context) { } } - func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSyncMessageHandler) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -600,7 +541,7 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync case <-ctx.Done(): return case <-ticker.C: - + if len(c.Peers()) == 0 { continue } @@ -609,8 +550,6 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync continue } - - for h := state.NextHeight(); h <= c.blocksReceived.latestSeenHeight; h++ { if ctx.Err() != nil { return @@ -653,7 +592,6 @@ func (c *Client) retrieveBlockSyncLoop(ctx context.Context, msgHandler BlockSync } } - func (c *Client) advertiseBlockSyncCids(ctx context.Context) { ticker := time.NewTicker(c.conf.BlockSyncRequestIntervalTime) defer ticker.Stop() @@ -663,7 +601,7 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - + if len(c.Peers()) == 0 { continue } @@ -693,13 +631,12 @@ func (c *Client) advertiseBlockSyncCids(ctx context.Context) { } } - + return } } } - func (c *Client) findConnection(peer peer.AddrInfo) bool { for _, con := range c.Host.Network().Conns() { if peer.ID == con.RemotePeer() { @@ -713,7 +650,6 @@ func getBlockSyncKeyByHeight(height uint64, revision uint64) string { return "/" + blockSyncProtocolPrefix + "/" + strconv.FormatUint(revision, 10) + "/" + strconv.FormatUint(height, 10) } - type blockIdValidator struct{} func (blockIdValidator) Validate(_ string, id []byte) error { diff --git a/p2p/events.go b/p2p/events.go index f88ca45e6..a16784afb 100644 --- a/p2p/events.go +++ b/p2p/events.go @@ -4,12 +4,7 @@ import ( uevent "github.com/dymensionxyz/dymint/utils/event" ) - - - - const ( - EventTypeKey = "p2p.event" ) @@ -18,12 +13,6 @@ const ( EventNewBlockSyncBlock = "NewBlockSyncBlock" ) - - - - - var EventQueryNewGossipedBlock = uevent.QueryFor(EventTypeKey, EventNewGossipedBlock) - var EventQueryNewBlockSyncBlock = uevent.QueryFor(EventTypeKey, EventNewBlockSyncBlock) diff --git a/p2p/gossip.go b/p2p/gossip.go index 2cb7c3f65..537820cac 100644 --- a/p2p/gossip.go +++ b/p2p/gossip.go @@ -13,28 +13,23 @@ import ( "github.com/dymensionxyz/dymint/types" ) - const pubsubBufferSize = 3000 - type GossipMessage struct { Data []byte From peer.ID } - type GossiperOption func(*Gossiper) error type GossipMessageHandler func(ctx context.Context, gossipedBlock []byte) - func WithValidator(validator GossipValidator) GossiperOption { return func(g *Gossiper) error { return g.ps.RegisterTopicValidator(g.topic.String(), wrapValidator(g, validator)) } } - type Gossiper struct { ownID peer.ID @@ -45,9 +40,6 @@ type Gossiper struct { logger types.Logger } - - - func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler GossipMessageHandler, logger types.Logger, options ...GossiperOption) (*Gossiper, error) { topic, err := ps.Join(topicStr) if err != nil { @@ -76,7 +68,6 @@ func NewGossiper(host host.Host, ps *pubsub.PubSub, topicStr string, msgHandler return g, nil } - func (g *Gossiper) Close() error { err := g.ps.UnregisterTopicValidator(g.topic.String()) g.sub.Cancel() @@ -86,12 +77,10 @@ func (g *Gossiper) Close() error { ) } - func (g *Gossiper) Publish(ctx context.Context, data []byte) error { return g.topic.Publish(ctx, data) } - func (g *Gossiper) ProcessMessages(ctx context.Context) { for { msg, err := g.sub.Next(ctx) @@ -110,8 +99,6 @@ func (g *Gossiper) ProcessMessages(ctx context.Context) { func wrapValidator(gossiper *Gossiper, validator GossipValidator) pubsub.Validator { return func(_ context.Context, _ peer.ID, msg *pubsub.Message) bool { - - if msg.GetFrom() == gossiper.ownID { return true } diff --git a/p2p/validator.go b/p2p/validator.go index 4c3b26c27..96fec5983 100644 --- a/p2p/validator.go +++ b/p2p/validator.go @@ -16,17 +16,12 @@ type StateGetter interface { GetRevision() uint64 } - type GossipValidator func(*GossipMessage) bool - type IValidator interface { - - TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator } - type Validator struct { logger types.Logger stateGetter StateGetter @@ -34,7 +29,6 @@ type Validator struct { var _ IValidator = (*Validator)(nil) - func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { return &Validator{ logger: logger, @@ -42,9 +36,6 @@ func NewValidator(logger types.Logger, blockmanager StateGetter) *Validator { } } - - - func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.MempoolIDs) GossipValidator { return func(txMessage *GossipMessage) bool { v.logger.Debug("Transaction received.", "bytes", len(txMessage.Data)) @@ -59,7 +50,7 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo case errors.Is(err, mempool.ErrTxInCache): return true case errors.Is(err, mempool.ErrMempoolIsFull{}): - return true + return true case errors.Is(err, mempool.ErrTxTooLarge{}): return false case errors.Is(err, mempool.ErrPreCheck{}): @@ -73,7 +64,6 @@ func (v *Validator) TxValidator(mp mempool.Mempool, mpoolIDS *nodemempool.Mempoo } } - func (v *Validator) BlockValidator() GossipValidator { return func(blockMsg *GossipMessage) bool { var gossipedBlock BlockData diff --git a/rpc/client/client.go b/rpc/client/client.go index e0b6b4a29..e82f43439 100644 --- a/rpc/client/client.go +++ b/rpc/client/client.go @@ -34,7 +34,6 @@ const ( defaultPerPage = 30 maxPerPage = 100 - subscribeTimeout = 5 * time.Second ) @@ -46,20 +45,15 @@ const ( SLValidated ) - var ErrConsensusStateNotAvailable = errors.New("consensus state not available in Dymint") var _ rpcclient.Client = &Client{} - - - type Client struct { *tmtypes.EventBus config *config.RPCConfig node *node.Node - genChunks []string } @@ -68,7 +62,6 @@ type ResultBlockValidated struct { Result BlockValidationStatus } - func NewClient(node *node.Node) *Client { return &Client{ EventBus: node.EventBus(), @@ -77,7 +70,6 @@ func NewClient(node *node.Node) *Client { } } - func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := c.Query().InfoSync(proxy.RequestInfo) if err != nil { @@ -86,12 +78,10 @@ func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } - func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } - func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { resQuery, err := c.Query().QuerySync(abci.RequestQuery{ Path: path, @@ -106,19 +96,13 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } - - func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - - - - subscriber := "" + subscriber := "" if err := c.IsSubscriptionAllowed(subscriber); err != nil { return nil, sdkerrors.Wrap(err, "subscription not allowed") } - subCtx, cancel := context.WithTimeout(ctx, subscribeTimeout) defer cancel() q := tmtypes.EventQueryTxFor(tx) @@ -134,7 +118,6 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } }() - checkTxResCh := make(chan *abci.Response, 1) err = c.node.Mempool.CheckTx(tx, func(res *abci.Response) { select { @@ -159,15 +142,13 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. }, nil } - err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failure to broadcast: %w", err) } - select { - case msg := <-deliverTxSub.Out(): + case msg := <-deliverTxSub.Out(): deliverTxRes, _ := msg.Data().(tmtypes.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -201,15 +182,12 @@ func (c *Client) BroadcastTxCommit(ctx context.Context, tx tmtypes.Tx) (*ctypes. } } - - - func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { err := c.node.Mempool.CheckTx(tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - + err = c.node.P2P.GossipTx(ctx, tx) if err != nil { return nil, fmt.Errorf("tx added to local mempool but failed to gossip: %w", err) @@ -217,9 +195,6 @@ func (c *Client) BroadcastTxAsync(ctx context.Context, tx tmtypes.Tx) (*ctypes.R return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } - - - func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) err := c.node.Mempool.CheckTx(tx, func(res *abci.Response) { @@ -231,16 +206,10 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re res := <-resCh r := res.GetCheckTx() - - - if r.Code == abci.CodeTypeOK { err = c.node.P2P.GossipTx(ctx, tx) if err != nil { - - - - + _ = c.node.Mempool.RemoveTxByKey(tx.Key()) return nil, fmt.Errorf("gossip tx: %w", err) } @@ -255,7 +224,6 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx tmtypes.Tx) (*ctypes.Re }, nil } - func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { @@ -283,7 +251,6 @@ func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCap return outc, nil } - func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := tmquery.New(query) if err != nil { @@ -292,12 +259,10 @@ func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) erro return c.EventBus.Unsubscribe(ctx, subscriber, q) } - func (c *Client) Genesis(_ context.Context) (*ctypes.ResultGenesis, error) { return &ctypes.ResultGenesis{Genesis: c.node.GetGenesis()}, nil } - func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { genChunks, err := c.GetGenesisChunks() if err != nil { @@ -312,19 +277,17 @@ func (c *Client) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenes return nil, fmt.Errorf("service configuration error, there are no chunks") } - if id > uint(chunkLen)-1 { return nil, fmt.Errorf("there are %d chunks, %d is invalid", chunkLen-1, id) } return &ctypes.ResultGenesisChunk{ TotalChunks: chunkLen, - ChunkNumber: int(id), + ChunkNumber: int(id), Data: genChunks[id], }, nil } - func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -336,8 +299,8 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) baseHeight = 1 } minHeight, maxHeight, err = filterMinMax( - int64(baseHeight), - int64(c.node.GetBlockManagerHeight()), + int64(baseHeight), + int64(c.node.GetBlockManagerHeight()), minHeight, maxHeight, limit) @@ -348,7 +311,7 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) blocks := make([]*tmtypes.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { - block, err := c.node.Store.LoadBlock(uint64(height)) + block, err := c.node.Store.LoadBlock(uint64(height)) if err != nil { return nil, err } @@ -362,12 +325,11 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) } return &ctypes.ResultBlockchainInfo{ - LastHeight: int64(c.node.GetBlockManagerHeight()), + LastHeight: int64(c.node.GetBlockManagerHeight()), BlockMetas: blocks, }, nil } - func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { res := ctypes.ResultNetInfo{ Listening: true, @@ -389,24 +351,18 @@ func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return &res, nil } - func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return nil, ErrConsensusStateNotAvailable } - func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return nil, ErrConsensusStateNotAvailable } - - - func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - params := c.node.GetGenesis().ConsensusParams return &ctypes.ResultConsensusParams{ - BlockHeight: int64(c.normalizeHeight(height)), + BlockHeight: int64(c.normalizeHeight(height)), ConsensusParams: tmproto.ConsensusParams{ Block: tmproto.BlockParams{ MaxBytes: params.Block.MaxBytes, @@ -428,14 +384,10 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re }, nil } - func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } - - - func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { heightValue := c.normalizeHeight(height) block, err := c.node.Store.LoadBlock(heightValue) @@ -459,7 +411,6 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, }, nil } - func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { var h [32]byte copy(h[:], hash) @@ -485,13 +436,12 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl }, nil } - func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { var h uint64 if height == nil { h = c.node.GetBlockManagerHeight() } else { - h = uint64(*height) + h = uint64(*height) } resp, err := c.node.Store.LoadBlockResponses(h) if err != nil { @@ -499,7 +449,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } return &ctypes.ResultBlockResults{ - Height: int64(h), + Height: int64(h), TxsResults: resp.DeliverTxs, BeginBlockEvents: resp.BeginBlock.Events, EndBlockEvents: resp.EndBlock.Events, @@ -508,7 +458,6 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul }, nil } - func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { heightValue := c.normalizeHeight(height) com, err := c.node.Store.LoadCommit(heightValue) @@ -528,7 +477,6 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return ctypes.NewResultCommit(&block.Header, commit, true), nil } - func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (*ctypes.ResultValidators, error) { height := c.normalizeHeight(heightPtr) @@ -538,14 +486,13 @@ func (c *Client) Validators(ctx context.Context, heightPtr *int64, _, _ *int) (* } return &ctypes.ResultValidators{ - BlockHeight: int64(height), + BlockHeight: int64(height), Validators: proposer.TMValidators(), Count: 1, Total: 1, }, nil } - func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { res, err := c.node.TxIndexer.Get(hash) if err != nil { @@ -561,8 +508,8 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul var proof tmtypes.TxProof if prove { - block, _ := c.node.Store.LoadBlock(uint64(height)) - blockProof := block.Data.Txs.Proof(int(index)) + block, _ := c.node.Store.LoadBlock(uint64(height)) + blockProof := block.Data.Txs.Proof(int(index)) proof = tmtypes.TxProof{ RootHash: blockProof.RootHash, Data: tmtypes.Tx(blockProof.Data), @@ -580,7 +527,6 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul }, nil } - func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*ctypes.ResultTxSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -592,7 +538,6 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, err } - switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -612,7 +557,6 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - totalCount := len(results) perPage := validatePerPage(perPagePtr) @@ -629,7 +573,6 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr r := results[i] var proof tmtypes.TxProof - apiResults = append(apiResults, &ctypes.ResultTx{ Hash: tmtypes.Tx(r.Tx).Hash(), @@ -644,8 +587,6 @@ func (c *Client) TxSearch(ctx context.Context, query string, prove bool, pagePtr return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } - - func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) { q, err := tmquery.New(query) if err != nil { @@ -657,7 +598,6 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, err } - switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { @@ -672,7 +612,6 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } - totalCount := len(results) perPageVal := validatePerPage(perPage) @@ -684,10 +623,9 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i skipCount := validateSkipCount(pageVal, perPageVal) pageSize := tmmath.MinInt(perPageVal, totalCount-skipCount) - blocks := make([]*ctypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { - b, err := c.node.Store.LoadBlock(uint64(results[i])) + b, err := c.node.Store.LoadBlock(uint64(results[i])) if err != nil { return nil, err } @@ -706,11 +644,9 @@ func (c *Client) BlockSearch(ctx context.Context, query string, page, perPage *i return &ctypes.ResultBlockSearch{Blocks: blocks, TotalCount: totalCount}, nil } - func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { latest, err := c.node.Store.LoadBlock(c.node.GetBlockManagerHeight()) if err != nil { - return nil, fmt.Errorf("find latest block: %w", err) } @@ -736,7 +672,6 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { txIndexerStatus := "on" result := &ctypes.ResultStatus{ - NodeInfo: p2p.DefaultNodeInfo{ ProtocolVersion: defaultProtocolVersion, DefaultNodeID: id, @@ -753,18 +688,12 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash[:], LatestAppHash: latestAppHash[:], - LatestBlockHeight: int64(latestHeight), + LatestBlockHeight: int64(latestHeight), LatestBlockTime: latestBlockTime, - + CatchingUp: c.node.BlockManager.TargetHeight.Load() > latestHeight, - - - - - - }, - + ValidatorInfo: ctypes.ValidatorInfo{ Address: tmbytes.HexBytes(proposer.ConsAddress()), PubKey: proposer.PubKey(), @@ -774,14 +703,12 @@ func (c *Client) Status(_ context.Context) (*ctypes.ResultStatus, error) { return result, nil } - func (c *Client) BroadcastEvidence(ctx context.Context, evidence tmtypes.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return &ctypes.ResultBroadcastEvidence{ Hash: evidence.Hash(), }, nil } - func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: c.node.Mempool.Size(), @@ -790,9 +717,7 @@ func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirm }, nil } - func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - limit := validatePerPage(limitPtr) txs := c.node.Mempool.ReapMaxTxs(limit) @@ -804,9 +729,6 @@ func (c *Client) UnconfirmedTxs(ctx context.Context, limitPtr *int) (*ctypes.Res }, nil } - - - func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultCheckTx, error) { res, err := c.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { @@ -817,20 +739,19 @@ func (c *Client) CheckTx(ctx context.Context, tx tmtypes.Tx) (*ctypes.ResultChec func (c *Client) BlockValidated(height *int64) (*ResultBlockValidated, error) { _, _, chainID := c.node.P2P.Info() - + if height == nil || *height < 0 { return &ResultBlockValidated{Result: -1, ChainID: chainID}, nil } - - if uint64(*height) > c.node.BlockManager.State.Height() { + + if uint64(*height) > c.node.BlockManager.State.Height() { return &ResultBlockValidated{Result: NotValidated, ChainID: chainID}, nil } - if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { + if uint64(*height) <= c.node.BlockManager.SettlementValidator.GetLastValidatedHeight() { return &ResultBlockValidated{Result: SLValidated, ChainID: chainID}, nil } - return &ResultBlockValidated{Result: P2PValidated, ChainID: chainID}, nil } @@ -856,7 +777,7 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm c.Logger.Error("subscription was cancelled, resubscribing...", "err", sub.Err(), "query", q.String()) sub = c.resubscribe(subscriber, q) - if sub == nil { + if sub == nil { return } case <-c.Quit(): @@ -865,7 +786,6 @@ func (c *Client) eventsRoutine(sub tmtypes.Subscription, subscriber string, q tm } } - func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscription { attempts := uint(0) for { @@ -879,7 +799,7 @@ func (c *Client) resubscribe(subscriber string, q tmpubsub.Query) tmtypes.Subscr } attempts++ - time.Sleep((10 << attempts) * time.Millisecond) + time.Sleep((10 << attempts) * time.Millisecond) } } @@ -904,7 +824,7 @@ func (c *Client) normalizeHeight(height *int64) uint64 { if height == nil || *height == 0 { heightValue = c.node.GetBlockManagerHeight() } else { - heightValue = uint64(*height) + heightValue = uint64(*height) } return heightValue @@ -921,7 +841,7 @@ func (c *Client) IsSubscriptionAllowed(subscriber string) error { } func validatePerPage(perPagePtr *int) int { - if perPagePtr == nil { + if perPagePtr == nil { return defaultPerPage } @@ -939,13 +859,13 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) } - if pagePtr == nil || *pagePtr <= 0 { + if pagePtr == nil || *pagePtr <= 0 { return 1, nil } pages := ((totalCount - 1) / perPage) + 1 if pages == 0 { - pages = 1 + pages = 1 } page := *pagePtr if page > pages { @@ -965,12 +885,10 @@ func validateSkipCount(page, perPage int) int { } func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { - if min < 0 || max < 0 { return min, max, errors.New("height must be greater than zero") } - if min == 0 { min = 1 } @@ -978,14 +896,10 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { max = height } - max = tmmath.MinInt64(height, max) - min = tmmath.MaxInt64(base, min) - - min = tmmath.MaxInt64(min, max-limit+1) if min > max { diff --git a/rpc/client/client_test.go b/rpc/client/client_test.go index d963196d4..5989dd910 100644 --- a/rpc/client/client_test.go +++ b/rpc/client/client_test.go @@ -374,7 +374,6 @@ func TestValidatedHeight(t *testing.T) { for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { - node.BlockManager.SettlementValidator.UpdateLastValidatedHeight(test.validatedHeight) node.BlockManager.LastSettlementHeight.Store(test.submittedHeight) @@ -389,7 +388,6 @@ func TestValidatedHeight(t *testing.T) { err = node.Stop() require.NoError(err) - } func TestGetCommit(t *testing.T) { diff --git a/rpc/client/utils.go b/rpc/client/utils.go index 04ec93e09..7e2aeb5b9 100644 --- a/rpc/client/utils.go +++ b/rpc/client/utils.go @@ -8,12 +8,9 @@ import ( ) const ( - - - genesisChunkSize = 16 * 1024 * 1024 + genesisChunkSize = 16 * 1024 * 1024 ) - func (c *Client) GetGenesisChunks() ([]string, error) { if c.genChunks != nil { return c.genChunks, nil @@ -26,8 +23,6 @@ func (c *Client) GetGenesisChunks() ([]string, error) { return c.genChunks, err } - - func (c *Client) initGenesisChunks(genesis *tmtypes.GenesisDoc) error { if genesis == nil { return nil diff --git a/rpc/json/handler.go b/rpc/json/handler.go index 46d70f126..1faea7da9 100644 --- a/rpc/json/handler.go +++ b/rpc/json/handler.go @@ -49,21 +49,16 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mux.ServeHTTP(w, r) } - func (h *handler) serveJSONRPC(w http.ResponseWriter, r *http.Request) { h.serveJSONRPCforWS(w, r, nil) } - - func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsConn *wsConn) { - codecReq := h.codec.NewRequest(r) - + method, err := codecReq.Method() if err != nil { if e, ok := err.(*json2.Error); method == "" && ok && e.Message == "EOF" { - return } codecReq.WriteError(w, http.StatusBadRequest, err) @@ -76,7 +71,6 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo return } - args := reflect.New(methodSpec.argsType) if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { codecReq.WriteError(w, http.StatusBadRequest, errRead) @@ -98,7 +92,6 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo } rets := methodSpec.m.Call(callArgs) - var errResult error statusCode := http.StatusOK errInter := rets[1].Interface() @@ -107,11 +100,8 @@ func (h *handler) serveJSONRPCforWS(w http.ResponseWriter, r *http.Request, wsCo errResult, _ = errInter.(error) } - - w.Header().Set("x-content-type-options", "nosniff") - if errResult == nil { var raw json.RawMessage raw, err = tmjson.Marshal(rets[0].Interface()) @@ -153,7 +143,7 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http case reflect.String: args.Elem().Field(i).SetString(rawVal) case reflect.Slice: - + if field.Type.Elem().Kind() == reflect.Uint8 { err = setByteSliceParam(rawVal, &args, i) } @@ -172,7 +162,6 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http args, }) - statusCode := http.StatusOK errInter := rets[1].Interface() if errInter != nil { @@ -185,8 +174,6 @@ func (h *handler) newHandler(methodSpec *method) func(http.ResponseWriter, *http } func (h *handler) encodeAndWriteResponse(w http.ResponseWriter, result interface{}, errResult error, statusCode int) { - - w.Header().Set("x-content-type-options", "nosniff") w.Header().Set("Content-Type", "application/json; charset=utf-8") diff --git a/rpc/json/service.go b/rpc/json/service.go index e1952f770..23b9187e1 100644 --- a/rpc/json/service.go +++ b/rpc/json/service.go @@ -20,13 +20,11 @@ import ( ) const ( - defaultSubscribeTimeout = 5 * time.Second - + defaultSubscribeBufferSize = 100 ) - func GetHTTPHandler(l *client.Client, logger types.Logger, opts ...option) (http.Handler, error) { return newHandler(newService(l, logger, opts...), json2.NewCodec(), logger), nil } @@ -137,9 +135,9 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } go func(subscriptionID []byte) { for msg := range out { - + var resp rpctypes.RPCResponse - + subscriptionIDInt, err := strconv.Atoi(string(subscriptionID)) if err != nil { s.logger.Info("Failed to convert subscriptionID to int") @@ -147,7 +145,7 @@ func (s *service) Subscribe(req *http.Request, args *subscribeArgs, wsConn *wsCo } else { resp = rpctypes.NewRPCSuccessResponse(rpctypes.JSONRPCIntID(subscriptionIDInt), msg) } - + jsonBytes, err := json.MarshalIndent(resp, "", " ") if err != nil { s.logger.Error("marshal RPCResponse to JSON", "err", err) @@ -180,7 +178,6 @@ func (s *service) UnsubscribeAll(req *http.Request, args *unsubscribeAllArgs) (* return &emptyResult{}, nil } - func (s *service) Health(req *http.Request, args *healthArgs) (*ctypes.ResultHealth, error) { return s.client.Health(req.Context()) } @@ -202,7 +199,7 @@ func (s *service) Genesis(req *http.Request, args *genesisArgs) (*ctypes.ResultG } func (s *service) GenesisChunked(req *http.Request, args *genesisChunkedArgs) (*ctypes.ResultGenesisChunk, error) { - return s.client.GenesisChunked(req.Context(), uint(args.ID)) + return s.client.GenesisChunked(req.Context(), uint(args.ID)) } func (s *service) Block(req *http.Request, args *blockArgs) (*ctypes.ResultBlock, error) { @@ -261,7 +258,6 @@ func (s *service) NumUnconfirmedTxs(req *http.Request, args *numUnconfirmedTxsAr return s.client.NumUnconfirmedTxs(req.Context()) } - func (s *service) BroadcastTxCommit(req *http.Request, args *broadcastTxCommitArgs) (*ctypes.ResultBroadcastTxCommit, error) { return s.client.BroadcastTxCommit(req.Context(), args.Tx) } @@ -274,7 +270,6 @@ func (s *service) BroadcastTxAsync(req *http.Request, args *broadcastTxAsyncArgs return s.client.BroadcastTxAsync(req.Context(), args.Tx) } - func (s *service) ABCIQuery(req *http.Request, args *ABCIQueryArgs) (*ctypes.ResultABCIQuery, error) { return s.client.ABCIQueryWithOptions(req.Context(), args.Path, args.Data, rpcclient.ABCIQueryOptions{ Height: int64(args.Height), @@ -286,7 +281,6 @@ func (s *service) ABCIInfo(req *http.Request, args *ABCIInfoArgs) (*ctypes.Resul return s.client.ABCIInfo(req.Context()) } - func (s *service) BroadcastEvidence(req *http.Request, args *broadcastEvidenceArgs) (*ctypes.ResultBroadcastEvidence, error) { return s.client.BroadcastEvidence(req.Context(), args.Evidence) } diff --git a/rpc/json/types.go b/rpc/json/types.go index 23e84dff6..468eec487 100644 --- a/rpc/json/types.go +++ b/rpc/json/types.go @@ -18,7 +18,6 @@ type unsubscribeArgs struct { } type unsubscribeAllArgs struct{} - type ( healthArgs struct{} statusArgs struct{} @@ -86,7 +85,6 @@ type unconfirmedTxsArgs struct { } type numUnconfirmedTxsArgs struct{} - type broadcastTxCommitArgs struct { Tx types.Tx `json:"tx"` } @@ -97,9 +95,6 @@ type broadcastTxAsyncArgs struct { Tx types.Tx `json:"tx"` } - - - type ABCIQueryArgs struct { Path string `json:"path"` Data bytes.HexBytes `json:"data"` @@ -107,31 +102,22 @@ type ABCIQueryArgs struct { Prove bool `json:"prove"` } - type ABCIInfoArgs struct{} - - type broadcastEvidenceArgs struct { Evidence types.Evidence `json:"evidence"` } type emptyResult struct{} - - - type StrInt int - type StrInt64 int64 - func (s *StrInt64) UnmarshalJSON(b []byte) error { return unmarshalStrInt64(b, s) } - func (s *StrInt) UnmarshalJSON(b []byte) error { var val StrInt64 err := unmarshalStrInt64(b, &val) diff --git a/rpc/json/ws.go b/rpc/json/ws.go index a9728e5a9..f11462ed1 100644 --- a/rpc/json/ws.go +++ b/rpc/json/ws.go @@ -40,7 +40,6 @@ func (wsc *wsConn) sendLoop() { } func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { - upgrader := websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, @@ -89,7 +88,7 @@ func (h *handler) wsHandler(w http.ResponseWriter, r *http.Request) { } if mt != websocket.TextMessage { - + h.logger.Debug("expected text message") continue } @@ -111,14 +110,12 @@ func newResponseWriter(w io.Writer) http.ResponseWriter { return &wsResponse{w} } - type wsResponse struct { w io.Writer } var _ http.ResponseWriter = wsResponse{} - func (w wsResponse) Write(bytes []byte) (int, error) { return w.w.Write(bytes) } diff --git a/rpc/middleware/client.go b/rpc/middleware/client.go index 6d175fb2b..d99ff7f9f 100644 --- a/rpc/middleware/client.go +++ b/rpc/middleware/client.go @@ -6,14 +6,11 @@ import ( "github.com/tendermint/tendermint/libs/log" ) - - type Client struct { registry *Registry logger log.Logger } - func NewClient(reg Registry, logger log.Logger) *Client { return &Client{ registry: ®, @@ -21,7 +18,6 @@ func NewClient(reg Registry, logger log.Logger) *Client { } } - func (mc *Client) Handle(h http.Handler) http.Handler { registeredMiddlewares := mc.registry.GetRegistered() finalHandler := h diff --git a/rpc/middleware/registry.go b/rpc/middleware/registry.go index 70a1b2222..cded31e31 100644 --- a/rpc/middleware/registry.go +++ b/rpc/middleware/registry.go @@ -12,20 +12,16 @@ var ( instance *Registry ) - type HandlerFunc func(http.Handler) http.Handler - type Middleware interface { Handler(logger log.Logger) HandlerFunc } - type Registry struct { middlewareList []Middleware } - func GetRegistry() *Registry { once.Do(func() { instance = &Registry{} @@ -33,12 +29,10 @@ func GetRegistry() *Registry { return instance } - func (r *Registry) Register(m Middleware) { r.middlewareList = append(r.middlewareList, m) } - func (r *Registry) GetRegistered() []Middleware { return r.middlewareList } diff --git a/rpc/middleware/status.go b/rpc/middleware/status.go index 16172aa48..7e3b9ec34 100644 --- a/rpc/middleware/status.go +++ b/rpc/middleware/status.go @@ -16,7 +16,7 @@ func (s Status) Handler(logger log.Logger) HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := s.Err() isHealthy := err == nil - + if r.URL.Path == "/health" { w.WriteHeader(http.StatusOK) diff --git a/rpc/server.go b/rpc/server.go index 9eafb9f91..ebf947dd9 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -26,7 +26,6 @@ import ( "github.com/dymensionxyz/dymint/rpc/middleware" ) - type Server struct { *service.BaseService @@ -43,21 +42,18 @@ type Server struct { const ( onStopTimeout = 5 * time.Second - + readHeaderTimeout = 5 * time.Second ) - type Option func(*Server) - func WithListener(listener net.Listener) Option { return func(d *Server) { d.listener = listener } } - func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, options ...Option) *Server { srv := &Server{ config: config, @@ -66,16 +62,12 @@ func NewServer(node *node.Node, config *config.RPCConfig, logger log.Logger, opt } srv.BaseService = service.NewBaseService(logger, "RPC", srv) - for _, option := range options { option(srv) } return srv } - - - func (s *Server) Client() rpcclient.Client { return s.client } @@ -84,13 +76,11 @@ func (s *Server) PubSubServer() *pubsub.Server { return s.node.PubSubServer() } - func (s *Server) OnStart() error { s.startEventListener() return s.startRPC() } - func (s *Server) OnStop() { ctx, cancel := context.WithTimeout(context.Background(), onStopTimeout) defer cancel() @@ -99,12 +89,10 @@ func (s *Server) OnStop() { } } - func (s *Server) startEventListener() { go uevent.MustSubscribe(context.Background(), s.PubSubServer(), "RPCNodeHealthStatusHandler", events.QueryHealthStatus, s.onNodeHealthUpdate, s.Logger) } - func (s *Server) onNodeHealthUpdate(event pubsub.Message) { eventData, _ := event.Data().(*events.DataHealthStatus) if eventData.Error != nil { @@ -169,13 +157,11 @@ func (s *Server) startRPC() error { handler = c.Handler(handler) } - reg := middleware.GetRegistry() reg.Register(middleware.Status{Err: s.getHealthStatus}) middlewareClient := middleware.NewClient(*reg, s.Logger.With("module", "rpc/middleware")) handler = middlewareClient.Handle(handler) - go func() { err := s.serve(listener, handler) if !errors.Is(err, http.ErrServerClosed) { diff --git a/settlement/config.go b/settlement/config.go index 4895849fd..628e8b156 100644 --- a/settlement/config.go +++ b/settlement/config.go @@ -5,7 +5,6 @@ import ( "time" ) - type Config struct { KeyringBackend string `mapstructure:"keyring_backend"` NodeAddress string `mapstructure:"settlement_node_address"` @@ -19,9 +18,9 @@ type Config struct { RetryMinDelay time.Duration `mapstructure:"retry_min_delay"` BatchAcceptanceTimeout time.Duration `mapstructure:"batch_acceptance_timeout"` BatchAcceptanceAttempts uint `mapstructure:"batch_acceptance_attempts"` - + ProposerPubKey string `json:"proposer_pub_key"` - + SLGrpc GrpcConfig `mapstructure:",squash"` } diff --git a/settlement/dymension/cosmosclient.go b/settlement/dymension/cosmosclient.go index 7feaab2f3..3b1f6f503 100644 --- a/settlement/dymension/cosmosclient.go +++ b/settlement/dymension/cosmosclient.go @@ -17,10 +17,6 @@ import ( sequencertypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/sequencer" ) - - - - type CosmosClient interface { Context() sdkclient.Context StartEventListener() error @@ -41,7 +37,6 @@ type cosmosClient struct { var _ CosmosClient = &cosmosClient{} - func NewCosmosClient(client cosmosclient.Client) CosmosClient { return &cosmosClient{client} } diff --git a/settlement/dymension/dymension.go b/settlement/dymension/dymension.go index 101aab439..f4c3f2627 100644 --- a/settlement/dymension/dymension.go +++ b/settlement/dymension/dymension.go @@ -38,7 +38,6 @@ const ( postBatchSubscriberPrefix = "postBatchSubscriber" ) - type Client struct { config *settlement.Config rollappId string @@ -58,7 +57,6 @@ type Client struct { var _ settlement.ClientI = &Client{} - func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { interfaceRegistry := cdctypes.NewInterfaceRegistry() cryptocodec.RegisterInterfaces(interfaceRegistry) @@ -76,7 +74,6 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub c.retryMinDelay = config.RetryMinDelay c.retryMaxDelay = config.RetryMaxDelay - for _, apply := range options { apply(c) } @@ -96,7 +93,6 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub return nil } - func (c *Client) Start() error { err := c.cosmosClient.StartEventListener() if err != nil { @@ -106,31 +102,26 @@ func (c *Client) Start() error { return nil } - func (c *Client) Stop() error { return c.cosmosClient.StopEventListener() } - - func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.ResultSubmitBatch) error { msgUpdateState, err := c.convertBatchToMsgUpdateState(batch, daResult) if err != nil { return fmt.Errorf("convert batch to msg update state: %w", err) } - postBatchSubscriberClient := fmt.Sprintf("%s-%d-%s", postBatchSubscriberPrefix, batch.StartHeight(), uuid.New().String()) subscription, err := c.pubsub.Subscribe(c.ctx, postBatchSubscriberClient, settlement.EventQueryNewSettlementBatchAccepted, 1000) if err != nil { return fmt.Errorf("pub sub subscribe to settlement state updates: %w", err) } - defer c.pubsub.UnsubscribeAll(c.ctx, postBatchSubscriberClient) for { - + err := c.RunWithRetryInfinitely(func() error { err := c.broadcastBatch(msgUpdateState) if err != nil { @@ -154,7 +145,6 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul return fmt.Errorf("broadcast batch: %w", err) } - timer := time.NewTimer(c.batchAcceptanceTimeout) defer timer.Stop() attempt := uint64(1) @@ -171,20 +161,20 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul eventData, _ := event.Data().(*settlement.EventDataNewBatch) if eventData.EndHeight != batch.EndHeight() { c.logger.Debug("Received event for a different batch, ignoring.", "event", eventData) - continue + continue } c.logger.Info("Batch accepted.", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight(), "stateIndex", eventData.StateIndex, "dapath", msgUpdateState.DAPath) return nil case <-timer.C: - + includedBatch, err := c.pollForBatchInclusion(batch.EndHeight()) timer.Reset(c.batchAcceptanceTimeout) - + if err == nil && !includedBatch { attempt++ if attempt <= uint64(c.batchAcceptanceAttempts) { - continue + continue } c.logger.Error( "Timed out waiting for batch inclusion on settlement layer", @@ -193,7 +183,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "endHeight", batch.EndHeight(), ) - break + break } if err != nil { c.logger.Error( @@ -205,13 +195,13 @@ func (c *Client) SubmitBatch(batch *types.Batch, _ da.Client, daResult *da.Resul "error", err, ) - continue + continue } - + c.logger.Info("Batch accepted", "startHeight", batch.StartHeight(), "endHeight", batch.EndHeight()) return nil } - break + break } } } @@ -237,7 +227,7 @@ func (c *Client) getStateInfo(index, height *uint64) (res *rollapptypes.QueryGet if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return @@ -259,13 +249,12 @@ func (c *Client) getLatestHeight(finalized bool) (res *rollapptypes.QueryGetLate if err != nil { return nil, fmt.Errorf("query state info: %w", err) } - if res == nil { + if res == nil { return nil, fmt.Errorf("empty response with nil err: %w", gerrc.ErrUnknown) } return } - func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, nil) if err != nil { @@ -274,7 +263,6 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(&index, nil) if err != nil { @@ -283,7 +271,6 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatch, error) { res, err := c.getStateInfo(nil, &height) if err != nil { @@ -292,7 +279,6 @@ func (c *Client) GetBatchAtHeight(height uint64) (*settlement.ResultRetrieveBatc return convertStateInfoToResultRetrieveBatch(&res.StateInfo) } - func (c *Client) GetLatestHeight() (uint64, error) { res, err := c.getLatestHeight(false) if err != nil { @@ -301,7 +287,6 @@ func (c *Client) GetLatestHeight() (uint64, error) { return res.Height, nil } - func (c *Client) GetLatestFinalizedHeight() (uint64, error) { res, err := c.getLatestHeight(true) if err != nil { @@ -310,16 +295,12 @@ func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return res.Height, nil } - - func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { - seqs, err := c.GetAllSequencers() if err != nil { return nil, fmt.Errorf("get bonded sequencers: %w", err) } - var proposerAddr string if height < 0 { proposerAddr, err = c.getLatestProposer() @@ -327,12 +308,10 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("get latest proposer: %w", err) } } else { - + res, err := c.GetBatchAtHeight(uint64(height)) - - + if err != nil { - if errors.Is(err, gerrc.ErrNotFound) { proposerAddr, err = c.getLatestProposer() if err != nil { @@ -350,7 +329,6 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer is sentinel") } - for _, seq := range seqs { if seq.SettlementAddress == proposerAddr { return &seq, nil @@ -359,7 +337,6 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { return nil, fmt.Errorf("proposer not found") } - func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { var res *sequencertypes.QueryGetSequencerResponse req := &sequencertypes.QueryGetSequencerRequest{ @@ -402,7 +379,6 @@ func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) ), nil } - func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappResponse req := &sequencertypes.QueryGetSequencersByRollappRequest{ @@ -425,7 +401,6 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return nil, err } - if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -455,7 +430,6 @@ func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return sequencerList, nil } - func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { var res *sequencertypes.QueryGetSequencersByRollappByStatusResponse req := &sequencertypes.QueryGetSequencersByRollappByStatusRequest{ @@ -479,7 +453,6 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return nil, err } - if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -508,10 +481,6 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return sequencerList, nil } - - - - func (c *Client) GetNextProposer() (*types.Sequencer, error) { var ( nextAddr string @@ -577,7 +546,6 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -586,7 +554,6 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { return &rollapp, nil } - func (c *Client) GetObsoleteDrs() ([]uint32, error) { var res *rollapptypes.QueryObsoleteDRSVersionsResponse req := &rollapptypes.QueryObsoleteDRSVersionsRequest{} @@ -606,7 +573,6 @@ func (c *Client) GetObsoleteDrs() ([]uint32, error) { return nil, fmt.Errorf("get rollapp: %w", err) } - if res == nil { return nil, fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } @@ -694,7 +660,6 @@ func getCosmosClientOptions(config *settlement.Config) []cosmosclient.Option { return options } - func (c *Client) pollForBatchInclusion(batchEndHeight uint64) (bool, error) { latestBatch, err := c.GetLatestBatch() if err != nil { @@ -768,7 +733,6 @@ func (c *Client) ValidateGenesisBridgeData(data rollapptypes.GenesisBridgeData) return fmt.Errorf("rollapp client: validate genesis bridge: %w", err) } - if res == nil { return fmt.Errorf("empty response: %w", gerrc.ErrUnknown) } diff --git a/settlement/dymension/events.go b/settlement/dymension/events.go index 29280911a..5bb5800a5 100644 --- a/settlement/dymension/events.go +++ b/settlement/dymension/events.go @@ -12,7 +12,6 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) - const ( eventStateUpdateFmt = "state_update.rollapp_id='%s' AND state_update.status='PENDING'" eventStateUpdateFinalizedFmt = "state_update.rollapp_id='%s' AND state_update.status='FINALIZED'" @@ -42,7 +41,6 @@ func (c *Client) eventHandler() { eventRotationStartedQ := fmt.Sprintf(eventRotationStartedFmt, c.rollappId) eventStateUpdateFinalizedQ := fmt.Sprintf(eventStateUpdateFinalizedFmt, c.rollappId) - eventMap := map[string]string{ eventStateUpdateQ: settlement.EventNewBatchAccepted, eventSequencersListQ: settlement.EventNewBondedSequencer, @@ -66,7 +64,7 @@ func (c *Client) eventHandler() { if err != nil { panic(fmt.Errorf("subscribe to events (%s): %w", eventStateUpdateFinalizedQ, err)) } - defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) + defer c.cosmosClient.UnsubscribeAll(c.ctx, subscriber) for { var e ctypes.ResultEvent @@ -74,7 +72,7 @@ func (c *Client) eventHandler() { case <-c.ctx.Done(): return case <-c.cosmosClient.EventListenerQuit(): - + return case e = <-stateUpdatesC: case e = <-sequencersListC: @@ -86,7 +84,6 @@ func (c *Client) eventHandler() { } func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[string]string) { - internalType, ok := eventMap[event.Query] if !ok { c.logger.Error("Ignoring event. Type not supported.", "event", event) @@ -105,7 +102,7 @@ func (c *Client) handleReceivedEvent(event ctypes.ResultEvent, eventMap map[stri func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBatch, error) { var errs []error - + events := rawEventData.Events if events["state_update.num_blocks"] == nil || events["state_update.start_height"] == nil || events["state_update.state_info_index"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -137,12 +134,10 @@ func convertToNewBatchEvent(rawEventData ctypes.ResultEvent) (*settlement.EventD } func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataNewBondedSequencer, error) { - events := rawEventData.Events if events["create_sequencer.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - if events["create_sequencer.sequencer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") @@ -154,14 +149,11 @@ func convertToNewSequencerEvent(rawEventData ctypes.ResultEvent) (*settlement.Ev } func convertToRotationStartedEvent(rawEventData ctypes.ResultEvent) (*settlement.EventDataRotationStarted, error) { - events := rawEventData.Events if events["proposer_rotation_started.rollapp_id"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } - - if events["proposer_rotation_started.next_proposer"] == nil { return nil, fmt.Errorf("missing expected attributes in event") } diff --git a/settlement/dymension/options.go b/settlement/dymension/options.go index 00cc5be2d..3032685f1 100644 --- a/settlement/dymension/options.go +++ b/settlement/dymension/options.go @@ -6,7 +6,6 @@ import ( "github.com/dymensionxyz/dymint/settlement" ) - func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -14,7 +13,6 @@ func WithCosmosClient(cosmosClient CosmosClient) settlement.Option { } } - func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -22,7 +20,6 @@ func WithRetryAttempts(batchRetryAttempts uint) settlement.Option { } } - func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -30,7 +27,6 @@ func WithBatchAcceptanceTimeout(batchAcceptanceTimeout time.Duration) settlement } } - func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -38,7 +34,6 @@ func WithBatchAcceptanceAttempts(batchAcceptanceAttempts uint) settlement.Option } } - func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) @@ -46,7 +41,6 @@ func WithRetryMinDelay(retryMinDelay time.Duration) settlement.Option { } } - func WithRetryMaxDelay(retryMaxDelay time.Duration) settlement.Option { return func(c settlement.ClientI) { dlc, _ := c.(*Client) diff --git a/settlement/dymension/utils.go b/settlement/dymension/utils.go index 6dbbae0a7..b8f2755c6 100644 --- a/settlement/dymension/utils.go +++ b/settlement/dymension/utils.go @@ -8,8 +8,6 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - - func (c *Client) RunWithRetry(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), @@ -20,8 +18,6 @@ func (c *Client) RunWithRetry(operation func() error) error { ) } - - func (c *Client) RunWithRetryInfinitely(operation func() error) error { return retry.Do(operation, retry.Context(c.ctx), diff --git a/settlement/errors.go b/settlement/errors.go index 55496c242..23bf121bc 100644 --- a/settlement/errors.go +++ b/settlement/errors.go @@ -6,7 +6,6 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - var ErrBatchNotAccepted = fmt.Errorf("batch not accepted: %w", gerrc.ErrUnknown) type ErrNextSequencerAddressFraud struct { diff --git a/settlement/events.go b/settlement/events.go index 931df574f..942d085c8 100644 --- a/settlement/events.go +++ b/settlement/events.go @@ -7,17 +7,14 @@ import ( ) const ( - EventTypeKey = "settlement.event" - EventNewBatchAccepted = "NewBatchAccepted" EventNewBondedSequencer = "NewBondedSequencer" EventRotationStarted = "RotationStarted" EventNewBatchFinalized = "NewBatchFinalized" ) - var ( EventNewBatchAcceptedList = map[string][]string{EventTypeKey: {EventNewBatchAccepted}} EventNewBondedSequencerList = map[string][]string{EventTypeKey: {EventNewBondedSequencer}} @@ -25,7 +22,6 @@ var ( EventNewBatchFinalizedList = map[string][]string{EventTypeKey: {EventNewBatchFinalized}} ) - var ( EventQueryNewSettlementBatchAccepted = uevent.QueryFor(EventTypeKey, EventNewBatchAccepted) EventQueryNewSettlementBatchFinalized = uevent.QueryFor(EventTypeKey, EventNewBatchFinalized) @@ -33,13 +29,11 @@ var ( EventQueryRotationStarted = uevent.QueryFor(EventTypeKey, EventRotationStarted) ) - - type EventDataNewBatch struct { StartHeight uint64 - + EndHeight uint64 - + StateIndex uint64 } diff --git a/settlement/grpc/grpc.go b/settlement/grpc/grpc.go index 45c5deef0..03a4ec0c7 100644 --- a/settlement/grpc/grpc.go +++ b/settlement/grpc/grpc.go @@ -36,8 +36,6 @@ const ( addressPrefix = "dym" ) - - type Client struct { ctx context.Context rollappID string @@ -59,14 +57,12 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { }, nil } - func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } var _ settlement.ClientI = (*Client)(nil) - func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { ctx := context.Background() @@ -149,7 +145,6 @@ func initConfig(conf settlement.Config) (proposer string, err error) { return } - func (c *Client) Start() error { c.logger.Info("Starting grpc mock settlement") @@ -159,7 +154,7 @@ func (c *Client) Start() error { for { select { case <-c.stopchan: - + return case <-tick.C: index, err := c.sl.GetIndex(c.ctx, &slmock.SLGetIndexRequest{}) @@ -185,14 +180,12 @@ func (c *Client) Start() error { return nil } - func (c *Client) Stop() error { c.logger.Info("Stopping grpc mock settlement") close(c.stopchan) return nil } - func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchtoSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -200,7 +193,7 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(10 * time.Millisecond) + time.Sleep(10 * time.Millisecond) err = c.pubsub.PublishWithEvents(context.Background(), &settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) if err != nil { return err @@ -208,7 +201,6 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return nil } - func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.logger.Info("GetLatestBatch grpc", "index", c.slStateIndex) batchResult, err := c.GetBatchAtIndex(atomic.LoadUint64(&c.slStateIndex)) @@ -218,7 +210,6 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } - func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -230,7 +221,6 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, } func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { - left, right := uint64(1), c.slStateIndex for left <= right { @@ -256,7 +246,6 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return nil, gerrc.ErrNotFound } - func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -279,17 +268,14 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } - func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in grpc SL") } - func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } - func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -298,17 +284,14 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } - func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } - func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight.Load(), nil } - func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } @@ -320,7 +303,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { if err != nil { return err } - + c.logger.Debug("Saving batch to grpc settlement layer", "index", c.slStateIndex+1) setBatchReply, err := c.sl.SetBatch(c.ctx, &slmock.SLSetBatchRequest{Index: c.slStateIndex + 1, Batch: b}) if err != nil { @@ -337,7 +320,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { return err } c.logger.Debug("Setting grpc SL Index to ", "index", setIndexReply.GetIndex()) - + c.latestHeight.Store(batch.EndHeight) return nil } diff --git a/settlement/local/local.go b/settlement/local/local.go index 20d3ec8ee..20ee5ef6f 100644 --- a/settlement/local/local.go +++ b/settlement/local/local.go @@ -38,18 +38,16 @@ const ( var ( settlementKVPrefix = []byte{0} - slStateIndexKey = []byte("slStateIndex") + slStateIndexKey = []byte("slStateIndex") ) - - type Client struct { rollappID string ProposerPubKey string logger types.Logger pubsub *pubsub.Server - mu sync.Mutex + mu sync.Mutex slStateIndex uint64 latestHeight uint64 settlementKV store.KV @@ -64,7 +62,6 @@ func (c *Client) GetRollapp() (*types.Rollapp, error) { var _ settlement.ClientI = (*Client)(nil) - func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...settlement.Option) error { slstore, proposer, err := initConfig(config) if err != nil { @@ -77,7 +74,7 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub b, err := settlementKV.Get(slStateIndexKey) if err == nil { slStateIndex = binary.BigEndian.Uint64(b) - + var settlementBatch rollapptypes.MsgUpdateState b, err := settlementKV.Get(keyFromIndex(slStateIndex)) if err != nil { @@ -101,9 +98,9 @@ func (c *Client) Init(config settlement.Config, rollappId string, pubsub *pubsub func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err error) { if conf.KeyringHomeDir == "" { - + slstore = store.NewDefaultInMemoryKVStore() - + if conf.ProposerPubKey != "" { proposer = conf.ProposerPubKey } else { @@ -135,17 +132,14 @@ func initConfig(conf settlement.Config) (slstore store.KV, proposer string, err return } - func (c *Client) Start() error { return nil } - func (c *Client) Stop() error { return c.settlementKV.Close() } - func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error { settlementBatch := c.convertBatchToSettlementBatch(batch, daResult) err := c.saveBatch(settlementBatch) @@ -153,14 +147,13 @@ func (c *Client) SubmitBatch(batch *types.Batch, daClient da.Client, daResult *d return err } - time.Sleep(100 * time.Millisecond) + time.Sleep(100 * time.Millisecond) ctx := context.Background() uevent.MustPublish(ctx, c.pubsub, settlement.EventDataNewBatch{EndHeight: settlementBatch.EndHeight}, settlement.EventNewBatchAcceptedList) return nil } - func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() ix := c.slStateIndex @@ -172,17 +165,14 @@ func (c *Client) GetLatestBatch() (*settlement.ResultRetrieveBatch, error) { return batchResult, nil } - func (c *Client) GetLatestHeight() (uint64, error) { return c.latestHeight, nil } - func (c *Client) GetLatestFinalizedHeight() (uint64, error) { return uint64(0), gerrc.ErrNotFound } - func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, error) { batchResult, err := c.retrieveBatchAtStateIndex(index) if err != nil { @@ -196,7 +186,7 @@ func (c *Client) GetBatchAtIndex(index uint64) (*settlement.ResultRetrieveBatch, func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, error) { c.mu.Lock() defer c.mu.Unlock() - + for i := c.slStateIndex; i > 0; i-- { b, err := c.GetBatchAtIndex(i) if err != nil { @@ -208,10 +198,9 @@ func (c *Client) GetBatchAtHeight(h uint64) (*settlement.ResultRetrieveBatch, er return b, nil } } - return nil, gerrc.ErrNotFound + return nil, gerrc.ErrNotFound } - func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { pubKeyBytes, err := hex.DecodeString(c.ProposerPubKey) if err != nil { @@ -234,22 +223,18 @@ func (c *Client) GetProposerAtHeight(height int64) (*types.Sequencer, error) { ), nil } - func (c *Client) GetSequencerByAddress(address string) (types.Sequencer, error) { panic("GetSequencerByAddress not implemented in local SL") } - func (c *Client) GetAllSequencers() ([]types.Sequencer, error) { return c.GetBondedSequencers() } - func (c *Client) GetObsoleteDrs() ([]uint32, error) { return []uint32{}, nil } - func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { proposer, err := c.GetProposerAtHeight(-1) if err != nil { @@ -258,7 +243,6 @@ func (c *Client) GetBondedSequencers() ([]types.Sequencer, error) { return []types.Sequencer{*proposer}, nil } - func (c *Client) GetNextProposer() (*types.Sequencer, error) { return nil, nil } @@ -274,7 +258,7 @@ func (c *Client) saveBatch(batch *settlement.Batch) error { c.mu.Lock() defer c.mu.Unlock() - + c.slStateIndex++ err = c.settlementKV.Set(keyFromIndex(c.slStateIndex), b) if err != nil { diff --git a/settlement/registry/registry.go b/settlement/registry/registry.go index c8bdbe5e5..7647f8a8d 100644 --- a/settlement/registry/registry.go +++ b/settlement/registry/registry.go @@ -7,26 +7,22 @@ import ( "github.com/dymensionxyz/dymint/settlement/local" ) - type Client string const ( - Local Client = "mock" - + Dymension Client = "dymension" - + Grpc Client = "grpc" ) - var clients = map[Client]func() settlement.ClientI{ Local: func() settlement.ClientI { return &local.Client{} }, Dymension: func() settlement.ClientI { return &dymension.Client{} }, Grpc: func() settlement.ClientI { return &grpc.Client{} }, } - func GetClient(client Client) settlement.ClientI { f, ok := clients[client] if !ok { @@ -35,7 +31,6 @@ func GetClient(client Client) settlement.ClientI { return f() } - func RegisteredClients() []Client { registered := make([]Client, 0, len(clients)) for client := range clients { diff --git a/settlement/settlement.go b/settlement/settlement.go index fbbbf9a63..8eeb49844 100644 --- a/settlement/settlement.go +++ b/settlement/settlement.go @@ -8,10 +8,8 @@ import ( "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - type StatusCode uint64 - const ( StatusUnknown StatusCode = iota StatusSuccess @@ -20,12 +18,10 @@ const ( ) type ResultBase struct { - Code StatusCode - + Message string - - + StateIndex uint64 } @@ -34,16 +30,14 @@ type BatchMetaData struct { } type Batch struct { - Sequencer string StartHeight uint64 EndHeight uint64 BlockDescriptors []rollapp.BlockDescriptor NextSequencer string - MetaData *BatchMetaData - NumBlocks uint64 + NumBlocks uint64 } type ResultRetrieveBatch struct { @@ -56,51 +50,46 @@ type State struct { } type ResultGetHeightState struct { - ResultBase + ResultBase State } - type Option func(ClientI) - type ClientI interface { - Init(config Config, rollappId string, pubsub *pubsub.Server, logger types.Logger, options ...Option) error - + Start() error - + Stop() error - - + SubmitBatch(batch *types.Batch, daClient da.Client, daResult *da.ResultSubmitBatch) error - + GetLatestBatch() (*ResultRetrieveBatch, error) - + GetBatchAtIndex(index uint64) (*ResultRetrieveBatch, error) - + GetSequencerByAddress(address string) (types.Sequencer, error) - + GetBatchAtHeight(index uint64) (*ResultRetrieveBatch, error) - + GetLatestHeight() (uint64, error) - + GetLatestFinalizedHeight() (uint64, error) - + GetAllSequencers() ([]types.Sequencer, error) - + GetBondedSequencers() ([]types.Sequencer, error) - + GetProposerAtHeight(height int64) (*types.Sequencer, error) - - + GetNextProposer() (*types.Sequencer, error) - + GetRollapp() (*types.Rollapp, error) - + GetObsoleteDrs() ([]uint32, error) - + GetSignerBalance() (types.Balance, error) - + ValidateGenesisBridgeData(data rollapp.GenesisBridgeData) error } diff --git a/store/badger.go b/store/badger.go index 6a67526f2..c12b69665 100644 --- a/store/badger.go +++ b/store/badger.go @@ -16,7 +16,7 @@ import ( const ( gcTimeout = 1 * time.Minute - discardRatio = 0.5 + discardRatio = 0.5 ) var ( @@ -24,14 +24,12 @@ var ( _ KVBatch = &BadgerBatch{} ) - type BadgerKV struct { db *badger.DB closing chan struct{} closeOnce sync.Once } - func NewDefaultInMemoryKVStore() KV { db, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) if err != nil { @@ -58,12 +56,10 @@ func NewKVStore(rootDir, dbPath, dbName string, syncWrites bool, logger types.Lo return b } - func NewDefaultKVStore(rootDir, dbPath, dbName string) KV { return NewKVStore(rootDir, dbPath, dbName, true, log.NewNopLogger()) } - func Rootify(rootDir, dbPath string) string { if filepath.IsAbs(dbPath) { return dbPath @@ -71,7 +67,6 @@ func Rootify(rootDir, dbPath string) string { return filepath.Join(rootDir, dbPath) } - func (b *BadgerKV) Close() error { b.closeOnce.Do(func() { close(b.closing) @@ -85,7 +80,7 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L for { select { case <-b.closing: - + return case <-ticker.C: err := b.db.RunValueLogGC(discardRatio) @@ -97,7 +92,6 @@ func (b *BadgerKV) gc(period time.Duration, discardRatio float64, logger types.L } } - func (b *BadgerKV) Get(key []byte) ([]byte, error) { txn := b.db.NewTransaction(false) defer txn.Discard() @@ -111,7 +105,6 @@ func (b *BadgerKV) Get(key []byte) ([]byte, error) { return item.ValueCopy(nil) } - func (b *BadgerKV) Set(key []byte, value []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -122,7 +115,6 @@ func (b *BadgerKV) Set(key []byte, value []byte) error { return txn.Commit() } - func (b *BadgerKV) Delete(key []byte) error { txn := b.db.NewTransaction(true) defer txn.Discard() @@ -133,20 +125,16 @@ func (b *BadgerKV) Delete(key []byte) error { return txn.Commit() } - - func (b *BadgerKV) NewBatch() KVBatch { return &BadgerBatch{ txn: b.db.NewTransaction(true), } } - type BadgerBatch struct { txn *badger.Txn } - func (bb *BadgerBatch) Set(key, value []byte) error { if err := bb.txn.Set(key, value); err != nil { return err @@ -155,24 +143,20 @@ func (bb *BadgerBatch) Set(key, value []byte) error { return nil } - func (bb *BadgerBatch) Delete(key []byte) error { return bb.txn.Delete(key) } - func (bb *BadgerBatch) Commit() error { return bb.txn.Commit() } - func (bb *BadgerBatch) Discard() { bb.txn.Discard() } var _ KVIterator = &BadgerIterator{} - func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { txn := b.db.NewTransaction(false) iter := txn.NewIterator(badger.DefaultIteratorOptions) @@ -185,7 +169,6 @@ func (b *BadgerKV) PrefixIterator(prefix []byte) KVIterator { } } - type BadgerIterator struct { txn *badger.Txn iter *badger.Iterator @@ -193,22 +176,18 @@ type BadgerIterator struct { lastError error } - func (i *BadgerIterator) Valid() bool { return i.iter.ValidForPrefix(i.prefix) } - func (i *BadgerIterator) Next() { i.iter.Next() } - func (i *BadgerIterator) Key() []byte { return i.iter.Item().KeyCopy(nil) } - func (i *BadgerIterator) Value() []byte { val, err := i.iter.Item().ValueCopy(nil) if err != nil { @@ -217,45 +196,34 @@ func (i *BadgerIterator) Value() []byte { return val } - func (i *BadgerIterator) Error() error { return i.lastError } - func (i *BadgerIterator) Discard() { i.iter.Close() i.txn.Discard() } - - func memoryEfficientBadgerConfig(path string, syncWrites bool) *badger.Options { - opts := badger.DefaultOptions(path) - - + opts := badger.DefaultOptions(path) + opts.SyncWrites = syncWrites - - - - + opts.BlockCacheSize = 0 - + opts.Compression = options.None - - + opts.MemTableSize = 16 << 20 - - + opts.NumMemtables = 3 - - + opts.NumLevelZeroTables = 3 - + opts.NumLevelZeroTablesStall = 5 - + opts.NumCompactors = 2 - + opts.CompactL0OnClose = true return &opts diff --git a/store/prefix.go b/store/prefix.go index e0f4f77d6..027e80390 100644 --- a/store/prefix.go +++ b/store/prefix.go @@ -5,18 +5,15 @@ var ( _ KVBatch = &PrefixKVBatch{} ) - type PrefixKV struct { kv KV prefix []byte } - func (p *PrefixKV) Close() error { return p.kv.Close() } - func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { return &PrefixKV{ kv: kv, @@ -24,22 +21,18 @@ func NewPrefixKV(kv KV, prefix []byte) *PrefixKV { } } - func (p *PrefixKV) Get(key []byte) ([]byte, error) { return p.kv.Get(append(p.prefix, key...)) } - func (p *PrefixKV) Set(key []byte, value []byte) error { return p.kv.Set(append(p.prefix, key...), value) } - func (p *PrefixKV) Delete(key []byte) error { return p.kv.Delete(append(p.prefix, key...)) } - func (p *PrefixKV) NewBatch() KVBatch { return &PrefixKVBatch{ b: p.kv.NewBatch(), @@ -47,33 +40,27 @@ func (p *PrefixKV) NewBatch() KVBatch { } } - func (p *PrefixKV) PrefixIterator(prefix []byte) KVIterator { return p.kv.PrefixIterator(append(p.prefix, prefix...)) } - type PrefixKVBatch struct { b KVBatch prefix []byte } - func (pb *PrefixKVBatch) Set(key, value []byte) error { return pb.b.Set(append(pb.prefix, key...), value) } - func (pb *PrefixKVBatch) Delete(key []byte) error { return pb.b.Delete(append(pb.prefix, key...)) } - func (pb *PrefixKVBatch) Commit() error { return pb.b.Commit() } - func (pb *PrefixKVBatch) Discard() { pb.b.Discard() } diff --git a/store/pruning.go b/store/pruning.go index 5d3ee3ed3..1571e5e30 100644 --- a/store/pruning.go +++ b/store/pruning.go @@ -8,7 +8,6 @@ import ( "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error) { pruned := uint64(0) from, err := s.LoadBaseHeight() @@ -29,7 +28,6 @@ func (s *DefaultStore) PruneStore(to uint64, logger types.Logger) (uint64, error return pruned, nil } - func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint64, error) { pruneBlocks := func(batch KVBatch, height uint64) error { hash, err := s.loadHashFromIndex(height) @@ -64,7 +62,6 @@ func (s *DefaultStore) pruneHeights(from, to uint64, logger types.Logger) (uint6 return pruned, err } - func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height uint64) error, logger types.Logger) (uint64, error) { pruned := uint64(0) batch := s.db.NewBatch() @@ -86,7 +83,6 @@ func (s *DefaultStore) prune(from, to uint64, prune func(batch KVBatch, height u } pruned++ - if pruned%1000 == 0 && pruned > 0 { err := flush(batch, h) if err != nil { diff --git a/store/store.go b/store/store.go index a0ee6dbd8..05a31cfb3 100644 --- a/store/store.go +++ b/store/store.go @@ -30,33 +30,26 @@ var ( lastBlockSequencerSetPrefix = [1]byte{14} ) - type DefaultStore struct { db KV } var _ Store = &DefaultStore{} - func New(kv KV) Store { return &DefaultStore{ db: kv, } } - func (s *DefaultStore) Close() error { return s.db.Close() } - func (s *DefaultStore) NewBatch() KVBatch { return s.db.NewBatch() } - - - func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) { hash := block.Header.Hash() blockBlob, err := block.MarshalBinary() @@ -69,7 +62,6 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return batch, fmt.Errorf("marshal Commit to binary: %w", err) } - if batch != nil { err = multierr.Append(err, batch.Set(getBlockKey(hash), blockBlob)) err = multierr.Append(err, batch.Set(getCommitKey(hash), commitBlob)) @@ -94,10 +86,6 @@ func (s *DefaultStore) SaveBlock(block *types.Block, commit *types.Commit, batch return nil, nil } - - - - func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { h, err := s.loadHashFromIndex(height) if err != nil { @@ -106,7 +94,6 @@ func (s *DefaultStore) LoadBlock(height uint64) (*types.Block, error) { return s.LoadBlockByHash(h) } - func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { blockData, err := s.db.Get(getBlockKey(hash)) if err != nil { @@ -121,7 +108,6 @@ func (s *DefaultStore) LoadBlockByHash(hash [32]byte) (*types.Block, error) { return block, nil } - func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, batch KVBatch) (KVBatch, error) { b := make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(source)) @@ -132,7 +118,6 @@ func (s *DefaultStore) SaveBlockSource(height uint64, source types.BlockSource, return batch, err } - func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) { source, err := s.db.Get(getSourceKey(height)) if err != nil { @@ -141,7 +126,6 @@ func (s *DefaultStore) LoadBlockSource(height uint64) (types.BlockSource, error) return types.BlockSource(binary.LittleEndian.Uint64(source)), nil } - func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) { data, err := responses.Marshal() if err != nil { @@ -154,7 +138,6 @@ func (s *DefaultStore) SaveBlockResponses(height uint64, responses *tmstate.ABCI return batch, err } - func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) { data, err := s.db.Get(getResponsesKey(height)) if err != nil { @@ -168,7 +151,6 @@ func (s *DefaultStore) LoadBlockResponses(height uint64) (*tmstate.ABCIResponses return &responses, nil } - func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { hash, err := s.loadHashFromIndex(height) if err != nil { @@ -177,7 +159,6 @@ func (s *DefaultStore) LoadCommit(height uint64) (*types.Commit, error) { return s.LoadCommitByHash(hash) } - func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { commitData, err := s.db.Get(getCommitKey(hash)) if err != nil { @@ -191,8 +172,6 @@ func (s *DefaultStore) LoadCommitByHash(hash [32]byte) (*types.Commit, error) { return commit, nil } - - func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, error) { pbState, err := state.ToProto() if err != nil { @@ -210,7 +189,6 @@ func (s *DefaultStore) SaveState(state *types.State, batch KVBatch) (KVBatch, er return batch, err } - func (s *DefaultStore) LoadState() (*types.State, error) { blob, err := s.db.Get(getStateKey()) if err != nil { @@ -231,7 +209,6 @@ func (s *DefaultStore) LoadState() (*types.State, error) { return &state, nil } - func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) { pbProposer, err := proposer.ToProto() if err != nil { @@ -249,7 +226,6 @@ func (s *DefaultStore) SaveProposer(height uint64, proposer types.Sequencer, bat return batch, err } - func (s *DefaultStore) LoadProposer(height uint64) (types.Sequencer, error) { blob, err := s.db.Get(getProposerKey(height)) if err != nil { diff --git a/store/storeIface.go b/store/storeIface.go index 4cdd2265b..2c8e878fe 100644 --- a/store/storeIface.go +++ b/store/storeIface.go @@ -7,27 +7,22 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - type KV interface { - Get(key []byte) ([]byte, error) - Set(key []byte, value []byte) error - Delete(key []byte) error - NewBatch() KVBatch - PrefixIterator(prefix []byte) KVIterator - Close() error + Get(key []byte) ([]byte, error) + Set(key []byte, value []byte) error + Delete(key []byte) error + NewBatch() KVBatch + PrefixIterator(prefix []byte) KVIterator + Close() error } - type KVBatch interface { - Set(key, value []byte) error - Delete(key []byte) error - Commit() error - Discard() + Set(key, value []byte) error + Delete(key []byte) error + Commit() error + Discard() } - type KVIterator interface { Valid() bool Next() @@ -37,37 +32,25 @@ type KVIterator interface { Discard() } - type Store interface { - NewBatch() KVBatch - SaveBlock(block *types.Block, commit *types.Commit, batch KVBatch) (KVBatch, error) - LoadBlock(height uint64) (*types.Block, error) - LoadBlockByHash(hash [32]byte) (*types.Block, error) - SaveBlockResponses(height uint64, responses *tmstate.ABCIResponses, batch KVBatch) (KVBatch, error) - LoadBlockResponses(height uint64) (*tmstate.ABCIResponses, error) - LoadCommit(height uint64) (*types.Commit, error) - LoadCommitByHash(hash [32]byte) (*types.Commit, error) - - SaveState(state *types.State, batch KVBatch) (KVBatch, error) - LoadState() (*types.State, error) SaveProposer(height uint64, proposer types.Sequencer, batch KVBatch) (KVBatch, error) diff --git a/test/loadtime/cmd/load/main.go b/test/loadtime/cmd/load/main.go index ef45d2c3b..874f46660 100644 --- a/test/loadtime/cmd/load/main.go +++ b/test/loadtime/cmd/load/main.go @@ -10,20 +10,15 @@ import ( "github.com/dymensionxyz/dymint/test/pb/loadtime" ) - var ( _ loadtest.ClientFactory = (*ClientFactory)(nil) _ loadtest.Client = (*TxGenerator)(nil) ) - type ClientFactory struct { ID []byte } - - - type TxGenerator struct { id []byte conns uint64 @@ -32,7 +27,7 @@ type TxGenerator struct { } func main() { - u := [16]byte(uuid.New()) + u := [16]byte(uuid.New()) if err := loadtest.RegisterClientFactory("loadtime-client", &ClientFactory{ID: u[:]}); err != nil { panic(err) } @@ -44,7 +39,6 @@ func main() { }) } - func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { psb, err := payload.MaxUnpaddedSize() if err != nil { @@ -56,9 +50,6 @@ func (f *ClientFactory) ValidateConfig(cfg loadtest.Config) error { return nil } - - - func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) { return &TxGenerator{ id: f.ID, @@ -68,7 +59,6 @@ func (f *ClientFactory) NewClient(cfg loadtest.Config) (loadtest.Client, error) }, nil } - func (c *TxGenerator) GenerateTx() ([]byte, error) { return payload.NewBytes(&loadtime.Payload{ Connections: c.conns, diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index 1f17e6f17..0951f00f0 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -19,19 +19,16 @@ const ( var mainPrefix = [1]byte{0} - type BlockStore struct { *store.DefaultStore base uint64 height uint64 } - func (b *BlockStore) Height() uint64 { return b.height } - func (b *BlockStore) Base() uint64 { return b.base } diff --git a/test/loadtime/payload/payload.go b/test/loadtime/payload/payload.go index 06f8d30b9..f334d6fed 100644 --- a/test/loadtime/payload/payload.go +++ b/test/loadtime/payload/payload.go @@ -16,9 +16,6 @@ const ( maxPayloadSize = 4 * 1024 * 1024 ) - - - func NewBytes(p *loadtime.Payload) ([]byte, error) { p.Padding = make([]byte, 1) nullTime := time.Time{} @@ -32,12 +29,11 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { if p.Size() > maxPayloadSize { return nil, fmt.Errorf("configured size %d is too large (>%d)", p.Size(), maxPayloadSize) } - pSize := int(p.GetSize_()) + pSize := int(p.GetSize_()) if pSize < us { return nil, fmt.Errorf("configured size %d not large enough to fit unpadded transaction of size %d", pSize, us) } - p.Padding = make([]byte, (pSize-us)/2) _, err = rand.Read(p.Padding) if err != nil { @@ -49,14 +45,9 @@ func NewBytes(p *loadtime.Payload) ([]byte, error) { } h := []byte(hex.EncodeToString(b)) - - return append([]byte(keyPrefix), h...), nil } - - - func FromBytes(b []byte) (*loadtime.Payload, error) { trH := bytes.TrimPrefix(b, []byte(keyPrefix)) if bytes.Equal(b, trH) { @@ -75,8 +66,6 @@ func FromBytes(b []byte) (*loadtime.Payload, error) { return p, nil } - - func MaxUnpaddedSize() (int, error) { p := &loadtime.Payload{ Time: time.Now(), @@ -88,9 +77,6 @@ func MaxUnpaddedSize() (int, error) { return CalculateUnpaddedSize(p) } - - - func CalculateUnpaddedSize(p *loadtime.Payload) (int, error) { if len(p.Padding) != 1 { return 0, fmt.Errorf("expected length of padding to be 1, received %d", len(p.Padding)) diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index f38865ecf..cffb6ccc5 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -13,66 +13,43 @@ import ( "github.com/dymensionxyz/dymint/types" ) - - - - type BlockStore interface { Height() uint64 Base() uint64 LoadBlock(uint64) (*types.Block, error) } - type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte } - - type Report struct { ID uuid.UUID Rate, Connections, Size uint64 Max, Min, Avg, StdDev time.Duration - - - - - NegativeCount int - TPS uint64 - - - All []DataPoint - sum int64 } - type Reports struct { s map[uuid.UUID]Report l []Report - - - errorCount int } - func (rs *Reports) List() []Report { return rs.l } - func (rs *Reports) ErrorCount() int { return rs.errorCount } @@ -100,9 +77,7 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has if int64(l) < 0 { r.NegativeCount++ } - - - + r.sum += int64(l) rs.s[id] = r } @@ -122,14 +97,12 @@ func (rs *Reports) calculateAll() { } } - func calculateTPS(in []DataPoint) uint64 { - blocks := make(map[time.Time]int) for _, v := range in { blocks[v.BlockTime]++ } - + var blockTimes []time.Time for k := range blocks { blockTimes = append(blockTimes, k) @@ -137,7 +110,7 @@ func calculateTPS(in []DataPoint) uint64 { sort.Slice(blockTimes, func(i, j int) bool { return blockTimes[i].Before(blockTimes[j]) }) - + TPS := uint64(0) for index, blockTime := range blockTimes { currentTx := blocks[blockTime] @@ -160,8 +133,6 @@ func (rs *Reports) addError() { rs.errorCount++ } - - func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID @@ -179,11 +150,6 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { s: make(map[uuid.UUID]Report), } - - - - - const poolSize = 16 txc := make(chan txData) diff --git a/testutil/block.go b/testutil/block.go index a07944d9e..952cd5dd7 100644 --- a/testutil/block.go +++ b/testutil/block.go @@ -37,14 +37,9 @@ const ( DefaultTestBatchSize = 5 ) - - - - func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypto.PrivKey, settlementlc settlement.ClientI, genesisHeight, storeInitialHeight, storeLastBlockHeight int64, proxyAppConns proxy.AppConns, mockStore store.Store) (*block.Manager, error) { genesis := GenerateGenesis(genesisHeight) - - + raw, _ := proposerKey.GetPublic().Raw() pubkey := ed25519.PubKey(raw) @@ -67,7 +62,6 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt return nil, err } - if settlementlc == nil { settlementlc = slregistry.GetClient(slregistry.Local) } @@ -96,7 +90,6 @@ func GetManagerWithProposerKey(conf config.BlockManagerConfig, proposerKey crypt mp := mempoolv1.NewTxMempool(logger, tmcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0) mpIDs := nodemempool.NewMempoolIDs() - p2pKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) p2pClient, err := p2p.NewClient(config.P2PConfig{ GossipSubCacheSize: 50, diff --git a/testutil/logger.go b/testutil/logger.go index c71789897..f96e8902e 100644 --- a/testutil/logger.go +++ b/testutil/logger.go @@ -6,15 +6,11 @@ import ( "testing" ) - - - type Logger struct { mtx *sync.Mutex T *testing.T } - func NewLogger(t *testing.T) *Logger { return &Logger{ mtx: new(sync.Mutex), @@ -22,7 +18,6 @@ func NewLogger(t *testing.T) *Logger { } } - func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -30,7 +25,6 @@ func (t *Logger) Debug(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"DEBUG: " + msg}, keyvals...)...) } - func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -38,7 +32,6 @@ func (t *Logger) Info(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"INFO: " + msg}, keyvals...)...) } - func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Helper() t.mtx.Lock() @@ -46,24 +39,18 @@ func (t *Logger) Error(msg string, keyvals ...interface{}) { t.T.Log(append([]interface{}{"ERROR: " + msg}, keyvals...)...) } - - - type MockLogger struct { DebugLines, InfoLines, ErrLines []string } - func (t *MockLogger) Debug(msg string, keyvals ...interface{}) { t.DebugLines = append(t.DebugLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } - func (t *MockLogger) Info(msg string, keyvals ...interface{}) { t.InfoLines = append(t.InfoLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } - func (t *MockLogger) Error(msg string, keyvals ...interface{}) { t.ErrLines = append(t.ErrLines, fmt.Sprint(append([]interface{}{msg}, keyvals...)...)) } diff --git a/testutil/mocks.go b/testutil/mocks.go index e750c081b..d86a6e757 100644 --- a/testutil/mocks.go +++ b/testutil/mocks.go @@ -29,27 +29,24 @@ import ( rollapptypes "github.com/dymensionxyz/dymint/types/pb/dymensionxyz/dymension/rollapp" ) - type ABCIMethod string const ( - InitChain ABCIMethod = "InitChain" - + CheckTx ABCIMethod = "CheckTx" - + BeginBlock ABCIMethod = "BeginBlock" - + DeliverTx ABCIMethod = "DeliverTx" - + EndBlock ABCIMethod = "EndBlock" - + Commit ABCIMethod = "Commit" - + Info ABCIMethod = "Info" ) - func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { app := GetAppMock() @@ -60,7 +57,6 @@ func GetABCIProxyAppMock(logger log.Logger) proxy.AppConns { return proxyApp } - func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app := &tmmocks.MockApplication{} gbdBz, _ := tmjson.Marshal(rollapptypes.GenesisBridgeData{}) @@ -72,7 +68,6 @@ func GetAppMock(excludeMethods ...ABCIMethod) *tmmocks.MockApplication { app.On("Commit", mock.Anything).Return(abci.ResponseCommit{}) app.On("Info", mock.Anything).Return(abci.ResponseInfo{LastBlockHeight: 0, LastBlockAppHash: []byte{0}}) - for _, method := range excludeMethods { UnsetMockFn(app.On(string(method))) } @@ -92,7 +87,6 @@ var UnsetMockFn = func(call *mock.Call) { } } - func CountMockCalls(totalCalls []mock.Call, methodName string) int { var count int for _, call := range totalCalls { @@ -103,7 +97,6 @@ func CountMockCalls(totalCalls []mock.Call, methodName string) int { return count } - type MockStore struct { ShoudFailSaveState bool ShouldFailUpdateStateWithBatch bool @@ -111,8 +104,6 @@ type MockStore struct { height uint64 } - - func (m *MockStore) SetHeight(height uint64) { m.height = height } @@ -125,7 +116,6 @@ func (m *MockStore) NextHeight() uint64 { return m.height + 1 } - func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KVBatch, error) { if batch != nil && m.ShouldFailUpdateStateWithBatch || m.ShoudFailSaveState && batch == nil { return nil, errors.New("failed to update state") @@ -133,7 +123,6 @@ func (m *MockStore) SaveState(state *types.State, batch store.KVBatch) (store.KV return m.DefaultStore.SaveState(state, batch) } - func NewMockStore() *MockStore { defaultStore := store.New(store.NewDefaultInMemoryKVStore()) return &MockStore{ @@ -148,27 +137,22 @@ const ( connectionRefusedErrorMessage = "connection refused" ) - type DALayerClientSubmitBatchError struct { localda.DataAvailabilityLayerClient } - func (s *DALayerClientSubmitBatchError) SubmitBatch(_ *types.Batch) da.ResultSubmitBatch { return da.ResultSubmitBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: connectionRefusedErrorMessage, Error: errors.New(connectionRefusedErrorMessage)}} } - type DALayerClientRetrieveBatchesError struct { localda.DataAvailabilityLayerClient } - func (m *DALayerClientRetrieveBatchesError) RetrieveBatches(_ *da.DASubmitMetaData) da.ResultRetrieveBatch { return da.ResultRetrieveBatch{BaseResult: da.BaseResult{Code: da.StatusError, Message: batchNotFoundErrorMessage, Error: da.ErrBlobNotFound}} } - type SubscribeMock struct { messageCh chan interface{} } @@ -195,8 +179,7 @@ type MockDA struct { func NewMockDA(t *testing.T) (*MockDA, error) { mockDA := &MockDA{} - - + mockDA.DaClient = registry.GetClient("celestia") config := celestia.Config{ @@ -233,7 +216,7 @@ func NewMockDA(t *testing.T) (*MockDA, error) { nIDSize := 1 tree := exampleNMT(nIDSize, true, 1, 2, 3, 4) - + proof, _ := tree.ProveNamespace(mockDA.NID) mockDA.BlobProof = blob.Proof([]*nmt.Proof{&proof}) @@ -244,7 +227,6 @@ func NewMockDA(t *testing.T) (*MockDA, error) { return mockDA, nil } - func exampleNMT(nidSize int, ignoreMaxNamespace bool, leavesNIDs ...byte) *nmt.NamespacedMerkleTree { tree := nmt.New(sha256.New(), nmt.NamespaceIDSize(nidSize), nmt.IgnoreMaxNamespace(ignoreMaxNamespace)) for i, nid := range leavesNIDs { diff --git a/testutil/node.go b/testutil/node.go index ac7e294b1..7ebf36d19 100644 --- a/testutil/node.go +++ b/testutil/node.go @@ -24,7 +24,7 @@ import ( func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, genesis *types.GenesisDoc) (*node.Node, error) { app := GetAppMock(EndBlock) - + clientCreator := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() @@ -48,7 +48,6 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, signingKey, pubkey, _ := crypto.GenerateEd25519Key(rand.Reader) pubkeyBytes, _ := pubkey.Raw() - nodeConfig := config.DefaultNodeConfig if blockManagerConfig == nil { @@ -62,7 +61,6 @@ func CreateNode(isSequencer bool, blockManagerConfig *config.BlockManagerConfig, } nodeConfig.BlockManagerConfig = *blockManagerConfig - nodeConfig.SettlementConfig = settlement.Config{ProposerPubKey: hex.EncodeToString(pubkeyBytes)} node, err := node.NewNode( diff --git a/testutil/p2p.go b/testutil/p2p.go index 318abb499..84eceab68 100644 --- a/testutil/p2p.go +++ b/testutil/p2p.go @@ -45,10 +45,8 @@ type HostDescr struct { RealKey bool } - var blackholeIP6 = net.ParseIP("100::") - func getAddr(sk crypto.PrivKey) (multiaddr.Multiaddr, error) { id, err := peer.IDFromPrivateKey(sk) if err != nil { @@ -92,7 +90,6 @@ func StartTestNetwork(ctx context.Context, t *testing.T, n int, conf map[int]Hos err := mnet.LinkAll() require.NoError(err) - seeds := make([]string, n) for src, descr := range conf { require.Less(src, n) diff --git a/testutil/rpc.go b/testutil/rpc.go index bbf17dae0..48273ea7f 100644 --- a/testutil/rpc.go +++ b/testutil/rpc.go @@ -13,13 +13,12 @@ import ( ) func CreateLocalServer(t *testing.T) (*rpc.Server, net.Listener) { - listener, err := nettest.NewLocalListener("tcp") require.NoError(t, err) serverReadyCh := make(chan bool, 1) var server *rpc.Server - + go func() { node, err := CreateNode(true, nil, GenerateGenesis(0)) require.NoError(t, err) diff --git a/testutil/types.go b/testutil/types.go index 70ce267db..3c737cbe7 100644 --- a/testutil/types.go +++ b/testutil/types.go @@ -21,9 +21,8 @@ import ( ) const ( - BlockVersion = 1 - + AppVersion = 0 SettlementAccountPrefix = "dym" @@ -63,7 +62,6 @@ func GenerateSettlementAddress() string { return addr } - func generateBlock(height uint64, proposerHash []byte, lastHeaderHash [32]byte) *types.Block { h := createRandomHashes() @@ -135,7 +133,6 @@ func GenerateBlocksWithTxs(startHeight uint64, num uint64, proposerKey crypto.Pr return blocks, nil } - func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -163,7 +160,6 @@ func GenerateBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, return blocks, nil } - func GenerateCommits(blocks []*types.Block, proposerKey crypto.PrivKey) ([]*types.Commit, error) { commits := make([]*types.Commit, len(blocks)) @@ -205,7 +201,6 @@ func generateSignature(proposerKey crypto.PrivKey, header *types.Header) ([]byte return sign, nil } - func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, lastBlockHeader [32]byte) (*types.Batch, error) { blocks, err := GenerateBlocks(startHeight, endHeight-startHeight+1, proposerKey, lastBlockHeader) if err != nil { @@ -223,7 +218,6 @@ func GenerateBatch(startHeight uint64, endHeight uint64, proposerKey crypto.Priv return batch, nil } - func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto.PrivKey, nextSequencerKey crypto.PrivKey, lastHeaderHash [32]byte) (*types.Batch, error) { nextSequencerRaw, _ := nextSequencerKey.Raw() nextSeq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(nextSequencerRaw).PubKey(), 1)) @@ -248,7 +242,6 @@ func GenerateLastBatch(startHeight uint64, endHeight uint64, proposerKey crypto. return batch, nil } - func GenerateLastBlocks(startHeight uint64, num uint64, proposerKey crypto.PrivKey, lastHeaderHash [32]byte, nextSequencerHash [32]byte) ([]*types.Block, error) { r, _ := proposerKey.Raw() seq := types.NewSequencerFromValidator(*tmtypes.NewValidator(ed25519.PrivKey(r).PubKey(), 1)) @@ -304,7 +297,6 @@ func MustGenerateBatchAndKey(startHeight uint64, endHeight uint64) *types.Batch return MustGenerateBatch(startHeight, endHeight, proposerKey) } - func GenerateRandomValidatorSet() *tmtypes.ValidatorSet { return tmtypes.NewValidatorSet([]*tmtypes.Validator{ tmtypes.NewValidator(ed25519.GenPrivKey().PubKey(), 1), @@ -320,11 +312,10 @@ func GenerateSequencer() types.Sequencer { ) } - func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubkey tmcrypto.PubKey) *types.State { s := &types.State{ ChainID: "test-chain", - InitialHeight: uint64(initialHeight), + InitialHeight: uint64(initialHeight), AppHash: [32]byte{}, LastResultsHash: GetEmptyLastResultsHash(), Version: tmstate.Version{ @@ -350,11 +341,10 @@ func GenerateStateWithSequencer(initialHeight int64, lastBlockHeight int64, pubk GenerateSettlementAddress(), []string{GenerateSettlementAddress()}, )) - s.SetHeight(uint64(lastBlockHeight)) + s.SetHeight(uint64(lastBlockHeight)) return s } - func GenerateGenesis(initialHeight int64) *tmtypes.GenesisDoc { return &tmtypes.GenesisDoc{ ChainID: "test-chain", diff --git a/types/batch.go b/types/batch.go index ecfadd20f..4501bad20 100644 --- a/types/batch.go +++ b/types/batch.go @@ -1,21 +1,18 @@ package types const ( - MaxBlockSizeAdjustment = 0.9 + MaxBlockSizeAdjustment = 0.9 ) - - type Batch struct { Blocks []*Block Commits []*Commit - + LastBatch bool DRSVersion []uint32 Revision uint64 } - func (b Batch) StartHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -23,7 +20,6 @@ func (b Batch) StartHeight() uint64 { return b.Blocks[0].Header.Height } - func (b Batch) EndHeight() uint64 { if len(b.Blocks) == 0 { return 0 @@ -31,14 +27,10 @@ func (b Batch) EndHeight() uint64 { return b.Blocks[len(b.Blocks)-1].Header.Height } - func (b Batch) NumBlocks() uint64 { return uint64(len(b.Blocks)) } - - - func (b Batch) SizeBlockAndCommitBytes() int { cnt := 0 for _, block := range b.Blocks { diff --git a/types/block.go b/types/block.go index 153eb3333..84a78792e 100644 --- a/types/block.go +++ b/types/block.go @@ -8,40 +8,27 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) - type Header struct { - Version Version Height uint64 - Time int64 + Time int64 - LastHeaderHash [32]byte - - LastCommitHash [32]byte - DataHash [32]byte - ConsensusHash [32]byte - AppHash [32]byte + LastCommitHash [32]byte + DataHash [32]byte + ConsensusHash [32]byte + AppHash [32]byte - - - LastResultsHash [32]byte - - - - - ProposerAddress []byte + ProposerAddress []byte - SequencerHash [32]byte - + NextSequencersHash [32]byte - ChainID string } @@ -54,16 +41,11 @@ var ( _ encoding.BinaryUnmarshaler = &Header{} ) - - - - type Version struct { Block uint64 App uint64 } - type Block struct { Header Header Data Data @@ -83,7 +65,6 @@ var ( _ encoding.BinaryUnmarshaler = &Block{} ) - type Data struct { Txs Txs IntermediateStateRoots IntermediateStateRoots @@ -91,16 +72,14 @@ type Data struct { ConsensusMessages []*proto.Any } - type EvidenceData struct { Evidence []Evidence } - type Commit struct { Height uint64 HeaderHash [32]byte - + Signatures []Signature TMSignature tmtypes.CommitSig } @@ -109,11 +88,8 @@ func (c Commit) SizeBytes() int { return c.ToProto().Size() } - type Signature []byte - - type IntermediateStateRoots struct { RawRootsList [][]byte } @@ -123,7 +99,6 @@ func GetLastCommitHash(lastCommit *Commit, header *Header) []byte { return lastABCICommit.Hash() } - func GetDataHash(block *Block) []byte { abciData := tmtypes.Data{ Txs: ToABCIBlockDataTxs(&block.Data), diff --git a/types/block_source.go b/types/block_source.go index 43a2a0be5..0077248d4 100644 --- a/types/block_source.go +++ b/types/block_source.go @@ -24,7 +24,7 @@ var AllSources = []string{"none", "produced", "gossip", "blocksync", "da", "loca type BlockMetaData struct { Source BlockSource DAHeight uint64 - SequencerSet Sequencers + SequencerSet Sequencers } type CachedBlock struct { diff --git a/types/conv.go b/types/conv.go index 37f66eceb..2cd1a3c26 100644 --- a/types/conv.go +++ b/types/conv.go @@ -6,22 +6,18 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) - - func ToABCIHeaderPB(header *Header) types.Header { tmheader := ToABCIHeader(header) return *tmheader.ToProto() } - - func ToABCIHeader(header *Header) tmtypes.Header { return tmtypes.Header{ Version: version.Consensus{ Block: header.Version.Block, App: header.Version.App, }, - Height: int64(header.Height), + Height: int64(header.Height), Time: header.GetTimestamp(), LastBlockID: tmtypes.BlockID{ Hash: header.LastHeaderHash[:], @@ -43,12 +39,10 @@ func ToABCIHeader(header *Header) tmtypes.Header { } } - - func ToABCIBlock(block *Block) (*tmtypes.Block, error) { abciHeader := ToABCIHeader(&block.Header) abciCommit := ToABCICommit(&block.LastCommit, &block.Header) - + if len(abciCommit.Signatures) == 1 { abciCommit.Signatures[0].ValidatorAddress = block.Header.ProposerAddress } @@ -65,7 +59,6 @@ func ToABCIBlock(block *Block) (*tmtypes.Block, error) { return &abciBlock, nil } - func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { txs := make([]tmtypes.Tx, len(data.Txs)) for i := range data.Txs { @@ -74,7 +67,6 @@ func ToABCIBlockDataTxs(data *Data) []tmtypes.Tx { return txs } - func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { tmblock, err := ToABCIBlock(block) if err != nil { @@ -90,13 +82,10 @@ func ToABCIBlockMeta(block *Block) (*tmtypes.BlockMeta, error) { }, nil } - - - func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { headerHash := header.Hash() tmCommit := tmtypes.Commit{ - Height: int64(commit.Height), + Height: int64(commit.Height), Round: 0, BlockID: tmtypes.BlockID{ Hash: headerHash[:], @@ -106,7 +95,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { }, }, } - + if len(commit.TMSignature.Signature) == 0 { for _, sig := range commit.Signatures { commitSig := tmtypes.CommitSig{ @@ -115,7 +104,7 @@ func ToABCICommit(commit *Commit, header *Header) *tmtypes.Commit { } tmCommit.Signatures = append(tmCommit.Signatures, commitSig) } - + if len(commit.Signatures) == 1 { tmCommit.Signatures[0].ValidatorAddress = header.ProposerAddress tmCommit.Signatures[0].Timestamp = header.GetTimestamp() diff --git a/types/errors.go b/types/errors.go index 418e5e5a6..9ae78d6b8 100644 --- a/types/errors.go +++ b/types/errors.go @@ -24,11 +24,8 @@ var ( ErrEmptyProposerAddress = errors.New("no proposer address") ) - var TimeFraudMaxDrift = 10 * time.Minute - - type ErrFraudHeightMismatch struct { Expected uint64 Actual uint64 @@ -37,7 +34,6 @@ type ErrFraudHeightMismatch struct { Proposer []byte } - func NewErrFraudHeightMismatch(expected uint64, header *Header) error { return &ErrFraudHeightMismatch{ Expected: expected, @@ -56,7 +52,6 @@ func (e ErrFraudHeightMismatch) Unwrap() error { return gerrc.ErrFault } - type ErrFraudAppHashMismatch struct { Expected [32]byte @@ -66,7 +61,6 @@ type ErrFraudAppHashMismatch struct { Proposer []byte } - func NewErrFraudAppHashMismatch(expected [32]byte, header *Header) error { return &ErrFraudAppHashMismatch{ Expected: expected, @@ -86,7 +80,6 @@ func (e ErrFraudAppHashMismatch) Unwrap() error { return gerrc.ErrFault } - type ErrLastResultsHashMismatch struct { Expected [32]byte @@ -96,7 +89,6 @@ type ErrLastResultsHashMismatch struct { LastResultHash [32]byte } - func NewErrLastResultsHashMismatch(expected [32]byte, header *Header) error { return &ErrLastResultsHashMismatch{ Expected: expected, @@ -116,7 +108,6 @@ func (e ErrLastResultsHashMismatch) Unwrap() error { return gerrc.ErrFault } - type ErrTimeFraud struct { Drift time.Duration ProposerAddress []byte @@ -153,7 +144,6 @@ func (e ErrTimeFraud) Unwrap() error { return gerrc.ErrFault } - type ErrLastHeaderHashMismatch struct { Expected [32]byte LastHeaderHash [32]byte @@ -174,7 +164,6 @@ func (e ErrLastHeaderHashMismatch) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidChainID struct { Expected string Block *Block @@ -200,8 +189,6 @@ func (e ErrInvalidChainID) Unwrap() error { return gerrc.ErrFault } - - type ErrInvalidBlockHeightFraud struct { Expected uint64 Header *Header @@ -227,7 +214,6 @@ func (e ErrInvalidBlockHeightFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidHeaderHashFraud struct { ExpectedHash [32]byte Header *Header @@ -253,7 +239,6 @@ func (e ErrInvalidHeaderHashFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidSignatureFraud struct { Err error Header *Header @@ -280,7 +265,6 @@ func (e ErrInvalidSignatureFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidProposerAddressFraud struct { ExpectedAddress []byte ActualAddress tmcrypto.Address @@ -308,7 +292,6 @@ func (e ErrInvalidProposerAddressFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidSequencerHashFraud struct { ExpectedHash [32]byte ActualHash []byte @@ -336,7 +319,6 @@ func (e ErrInvalidSequencerHashFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidNextSequencersHashFraud struct { ExpectedHash [32]byte Header Header @@ -361,7 +343,6 @@ func (e ErrInvalidNextSequencersHashFraud) Unwrap() error { return gerrc.ErrFault } - type ErrInvalidHeaderDataHashFraud struct { Expected [32]byte Actual [32]byte @@ -390,7 +371,6 @@ func (e ErrInvalidHeaderDataHashFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateNumBlocksNotMatchingFraud struct { StateIndex uint64 SLNumBlocks uint64 @@ -418,8 +398,6 @@ func (e ErrStateUpdateNumBlocksNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - - type ErrStateUpdateHeightNotMatchingFraud struct { StateIndex uint64 SLBeginHeight uint64 @@ -449,7 +427,6 @@ func (e ErrStateUpdateHeightNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateStateRootNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -478,7 +455,6 @@ func (e ErrStateUpdateStateRootNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateTimestampNotMatchingFraud struct { StateIndex uint64 Height uint64 @@ -506,7 +482,6 @@ func (e ErrStateUpdateTimestampNotMatchingFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateDoubleSigningFraud struct { DABlock *Block P2PBlock *Block @@ -571,7 +546,6 @@ func getJsonFromBlock(block *Block) ([]byte, error) { return jsonBlock, nil } - type ErrStateUpdateBlobNotAvailableFraud struct { StateIndex uint64 DA string @@ -599,7 +573,6 @@ func (e ErrStateUpdateBlobNotAvailableFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateBlobCorruptedFraud struct { StateIndex uint64 DA string @@ -627,7 +600,6 @@ func (e ErrStateUpdateBlobCorruptedFraud) Unwrap() error { return gerrc.ErrFault } - type ErrStateUpdateDRSVersionFraud struct { StateIndex uint64 Height uint64 diff --git a/types/evidence.go b/types/evidence.go index 8aff5b04f..4978520ec 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -3,19 +3,15 @@ package types import ( "time" - - abci "github.com/tendermint/tendermint/abci/types" ) - - type Evidence interface { - ABCI() []abci.Evidence - Bytes() []byte - Hash() []byte - Height() int64 - String() string - Time() time.Time - ValidateBasic() error + ABCI() []abci.Evidence + Bytes() []byte + Hash() []byte + Height() int64 + String() string + Time() time.Time + ValidateBasic() error } diff --git a/types/hashing.go b/types/hashing.go index 931df948a..465b522ce 100644 --- a/types/hashing.go +++ b/types/hashing.go @@ -1,6 +1,5 @@ package types - func (h *Header) Hash() [32]byte { var hash [32]byte abciHeader := ToABCIHeader(h) @@ -8,7 +7,6 @@ func (h *Header) Hash() [32]byte { return hash } - func (b *Block) Hash() [32]byte { return b.Header.Hash() } diff --git a/types/instruction.go b/types/instruction.go index 8f735f6d1..8bb9426ca 100644 --- a/types/instruction.go +++ b/types/instruction.go @@ -33,7 +33,7 @@ func LoadInstructionFromDisk(dir string) (Instruction, error) { var instruction Instruction filePath := filepath.Join(dir, instructionFileName) - data, err := os.ReadFile(filePath) + data, err := os.ReadFile(filePath) if err != nil { return Instruction{}, err } diff --git a/types/logger.go b/types/logger.go index dfc89d708..c60514627 100644 --- a/types/logger.go +++ b/types/logger.go @@ -1,6 +1,5 @@ package types - type Logger interface { Debug(msg string, keyvals ...interface{}) Info(msg string, keyvals ...interface{}) diff --git a/types/pb/dymensionxyz/dymension/rollapp/errors.go b/types/pb/dymensionxyz/dymension/rollapp/errors.go index 1d9d3c05c..8b3d029a3 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/errors.go +++ b/types/pb/dymensionxyz/dymension/rollapp/errors.go @@ -1,13 +1,10 @@ package rollapp - - import ( errorsmod "cosmossdk.io/errors" "github.com/dymensionxyz/gerr-cosmos/gerrc" ) - var ( ErrRollappExists = errorsmod.Register(ModuleName, 1000, "rollapp already exists") ErrInvalidInitialSequencer = errorsmod.Register(ModuleName, 1001, "empty initial sequencer") @@ -44,7 +41,6 @@ var ( ErrInvalidRequest = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid request") ErrInvalidVMType = errorsmod.Wrap(gerrc.ErrInvalidArgument, "invalid vm type") - ErrDisputeAlreadyFinalized = errorsmod.Register(ModuleName, 2000, "disputed height already finalized") ErrDisputeAlreadyReverted = errorsmod.Register(ModuleName, 2001, "disputed height already reverted") ErrWrongClientId = errorsmod.Register(ModuleName, 2002, "client id does not match the rollapp") diff --git a/types/pb/dymensionxyz/dymension/rollapp/events.go b/types/pb/dymensionxyz/dymension/rollapp/events.go index 259a12f03..71fbf9f88 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/events.go +++ b/types/pb/dymensionxyz/dymension/rollapp/events.go @@ -11,12 +11,10 @@ const ( AttributeKeyDAPath = "da_path" AttributeKeyStatus = "status" - EventTypeFraud = "fraud_proposal" AttributeKeyFraudHeight = "fraud_height" AttributeKeyFraudSequencer = "fraud_sequencer" AttributeKeyClientID = "client_id" - EventTypeTransferGenesisTransfersEnabled = "transfer_genesis_transfers_enabled" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/keys.go b/types/pb/dymensionxyz/dymension/rollapp/keys.go index 61858ca0d..18ac0ca4e 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/keys.go +++ b/types/pb/dymensionxyz/dymension/rollapp/keys.go @@ -1,19 +1,14 @@ package rollapp const ( - ModuleName = "rollapp" - StoreKey = ModuleName - RouterKey = ModuleName - QuerierRoute = ModuleName - MemStoreKey = "mem_rollapp" ) diff --git a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go index 0dec4fc93..95a8324f0 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go +++ b/types/pb/dymensionxyz/dymension/rollapp/message_update_state.go @@ -25,7 +25,6 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidAddress, "invalid creator address (%s)", err) } - if msg.NumBlocks == uint64(0) { return errorsmod.Wrap(ErrInvalidNumBlocks, "number of blocks can not be zero") } @@ -34,22 +33,19 @@ func (msg *MsgUpdateState) ValidateBasic() error { return errorsmod.Wrapf(ErrInvalidNumBlocks, "numBlocks(%d) + startHeight(%d) exceeds max uint64", msg.NumBlocks, msg.StartHeight) } - if uint64(len(msg.BDs.BD)) != msg.NumBlocks { return errorsmod.Wrapf(ErrInvalidNumBlocks, "number of blocks (%d) != number of block descriptors(%d)", msg.NumBlocks, len(msg.BDs.BD)) } - if msg.StartHeight == 0 { return errorsmod.Wrapf(ErrWrongBlockHeight, "StartHeight must be greater than zero") } - for bdIndex := uint64(0); bdIndex < msg.NumBlocks; bdIndex += 1 { if msg.BDs.BD[bdIndex].Height != msg.StartHeight+bdIndex { return ErrInvalidBlockSequence } - + if len(msg.BDs.BD[bdIndex].StateRoot) != 32 { return errorsmod.Wrapf(ErrInvalidStateRoot, "StateRoot of block high (%d) must be 32 byte array. But received (%d) bytes", msg.BDs.BD[bdIndex].Height, len(msg.BDs.BD[bdIndex].StateRoot)) diff --git a/types/pb/dymensionxyz/dymension/rollapp/params.go b/types/pb/dymensionxyz/dymension/rollapp/params.go index 64c9ad818..a6e392842 100644 --- a/types/pb/dymensionxyz/dymension/rollapp/params.go +++ b/types/pb/dymensionxyz/dymension/rollapp/params.go @@ -2,7 +2,6 @@ package rollapp import "gopkg.in/yaml.v2" - func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/pb/dymensionxyz/dymension/sequencer/events.go b/types/pb/dymensionxyz/dymension/sequencer/events.go index 01fd6ea51..986a48fa7 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/events.go +++ b/types/pb/dymensionxyz/dymension/sequencer/events.go @@ -1,27 +1,20 @@ package sequencer - const ( - EventTypeCreateSequencer = "create_sequencer" AttributeKeyRollappId = "rollapp_id" AttributeKeySequencer = "sequencer" AttributeKeyBond = "bond" AttributeKeyProposer = "proposer" - EventTypeUnbonding = "unbonding" AttributeKeyCompletionTime = "completion_time" - EventTypeNoBondedSequencer = "no_bonded_sequencer" - EventTypeProposerRotated = "proposer_rotated" - EventTypeUnbonded = "unbonded" - EventTypeSlashed = "slashed" ) diff --git a/types/pb/dymensionxyz/dymension/sequencer/keys.go b/types/pb/dymensionxyz/dymension/sequencer/keys.go index 8bae0a8a5..fd7dd13a7 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/keys.go +++ b/types/pb/dymensionxyz/dymension/sequencer/keys.go @@ -11,63 +11,49 @@ import ( var _ binary.ByteOrder const ( - ModuleName = "sequencer" - StoreKey = ModuleName - RouterKey = ModuleName - QuerierRoute = ModuleName - MemStoreKey = "mem_sequencer" ) var ( - KeySeparator = "/" - - SequencersKeyPrefix = []byte{0x00} + SequencersKeyPrefix = []byte{0x00} - - SequencersByRollappKeyPrefix = []byte{0x01} + SequencersByRollappKeyPrefix = []byte{0x01} BondedSequencersKeyPrefix = []byte{0xa1} UnbondedSequencersKeyPrefix = []byte{0xa2} UnbondingSequencersKeyPrefix = []byte{0xa3} - UnbondingQueueKey = []byte{0x41} + UnbondingQueueKey = []byte{0x41} ) - func SequencerKey(sequencerAddress string) []byte { sequencerAddrBytes := []byte(sequencerAddress) return []byte(fmt.Sprintf("%s%s%s", SequencersKeyPrefix, KeySeparator, sequencerAddrBytes)) } - func SequencerByRollappByStatusKey(rollappId, seqAddr string, status OperatingStatus) []byte { return append(SequencersByRollappByStatusKey(rollappId, status), []byte(seqAddr)...) } - func SequencersKey() []byte { return SequencersKeyPrefix } - func SequencersByRollappKey(rollappId string) []byte { rollappIdBytes := []byte(rollappId) return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKeyPrefix, KeySeparator, rollappIdBytes)) } - func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) []byte { - var prefix []byte switch status { case Bonded: @@ -81,16 +67,14 @@ func SequencersByRollappByStatusKey(rollappId string, status OperatingStatus) [] return []byte(fmt.Sprintf("%s%s%s", SequencersByRollappKey(rollappId), KeySeparator, prefix)) } - func UnbondingQueueByTimeKey(endTime time.Time) []byte { timeBz := sdk.FormatTimeBytes(endTime) prefixL := len(UnbondingQueueKey) bz := make([]byte, prefixL+len(timeBz)) - copy(bz[:prefixL], UnbondingQueueKey) - + copy(bz[prefixL:prefixL+len(timeBz)], timeBz) return bz diff --git a/types/pb/dymensionxyz/dymension/sequencer/params.go b/types/pb/dymensionxyz/dymension/sequencer/params.go index de39b13dc..010572fbf 100644 --- a/types/pb/dymensionxyz/dymension/sequencer/params.go +++ b/types/pb/dymensionxyz/dymension/sequencer/params.go @@ -4,7 +4,6 @@ import ( "gopkg.in/yaml.v2" ) - func (p Params) String() string { out, _ := yaml.Marshal(p) return string(out) diff --git a/types/pb/dymint/state.pb.go b/types/pb/dymint/state.pb.go index 512664f00..8054e566a 100644 --- a/types/pb/dymint/state.pb.go +++ b/types/pb/dymint/state.pb.go @@ -185,7 +185,7 @@ func (m *State) GetRevisionStartHeight() int64 { return 0 } -//rollapp params defined in genesis and updated via gov proposal +// rollapp params defined in genesis and updated via gov proposal type RollappParams struct { //data availability type (e.g. celestia) used in the rollapp Da string `protobuf:"bytes,1,opt,name=da,proto3" json:"da,omitempty"` diff --git a/types/rollapp.go b/types/rollapp.go index 87951daf5..59fdd1b91 100644 --- a/types/rollapp.go +++ b/types/rollapp.go @@ -14,7 +14,6 @@ type Revision struct { func (r Rollapp) LatestRevision() Revision { if len(r.Revisions) == 0 { - return Revision{} } return r.Revisions[len(r.Revisions)-1] diff --git a/types/sequencer_set.go b/types/sequencer_set.go index 1d294b2c7..58ce2c720 100644 --- a/types/sequencer_set.go +++ b/types/sequencer_set.go @@ -13,18 +13,13 @@ import ( "github.com/tendermint/tendermint/types" ) - - - type Sequencer struct { - SettlementAddress string - + RewardAddr string - + WhitelistedRelayers []string - val types.Validator } @@ -45,8 +40,6 @@ func NewSequencer( } } - - func (s Sequencer) IsEmpty() bool { return s.val.PubKey == nil } @@ -71,7 +64,6 @@ func (s Sequencer) TMValset() (*types.ValidatorSet, error) { return types.ValidatorSetFromExistingValidators(s.TMValidators()) } - func (s Sequencer) Hash() ([]byte, error) { vs, err := s.TMValset() if err != nil { @@ -80,7 +72,6 @@ func (s Sequencer) Hash() ([]byte, error) { return vs.Hash(), nil } - func (s Sequencer) MustHash() []byte { h, err := s.Hash() if err != nil { @@ -89,7 +80,6 @@ func (s Sequencer) MustHash() []byte { return h } - func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { val := s.TMValidator() pubKey, err := cryptocodec.FromTmPubKeyInterface(val.PubKey) @@ -103,7 +93,6 @@ func (s Sequencer) AnyConsPubKey() (*codectypes.Any, error) { return anyPK, nil } - func (s Sequencer) MustFullHash() []byte { h := sha256.New() h.Write([]byte(s.SettlementAddress)) @@ -115,14 +104,6 @@ func (s Sequencer) MustFullHash() []byte { return h.Sum(nil) } - - - - - - - - func SequencerListRightOuterJoin(A, B Sequencers) Sequencers { lhsSet := make(map[string]struct{}) for _, s := range A { @@ -141,13 +122,8 @@ func (s Sequencer) String() string { return fmt.Sprintf("Sequencer{SettlementAddress: %s RewardAddr: %s WhitelistedRelayers: %v Validator: %s}", s.SettlementAddress, s.RewardAddr, s.WhitelistedRelayers, s.val.String()) } - type Sequencers []Sequencer - - - - type SequencerSet struct { mu sync.RWMutex sequencers Sequencers @@ -160,7 +136,6 @@ func NewSequencerSet(s ...Sequencer) *SequencerSet { } } - func (s *SequencerSet) Set(sequencers Sequencers) { s.mu.Lock() defer s.mu.Unlock() @@ -173,7 +148,6 @@ func (s *SequencerSet) GetAll() Sequencers { return slices.Clone(s.sequencers) } - func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -185,8 +159,6 @@ func (s *SequencerSet) GetByHash(hash []byte) (Sequencer, bool) { return Sequencer{}, false } - - func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -198,7 +170,6 @@ func (s *SequencerSet) GetByAddress(settlementAddress string) (Sequencer, bool) return Sequencer{}, false } - func (s *SequencerSet) GetByConsAddress(consAddr []byte) (Sequencer, bool) { s.mu.RLock() defer s.mu.RUnlock() @@ -214,10 +185,6 @@ func (s *SequencerSet) String() string { return fmt.Sprintf("SequencerSet: %v", s.sequencers) } - - - - func NewSequencerFromValidator(val types.Validator) *Sequencer { return &Sequencer{ SettlementAddress: "", diff --git a/types/serialization.go b/types/serialization.go index 14965e6a0..d7c007015 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -12,17 +12,14 @@ import ( pb "github.com/dymensionxyz/dymint/types/pb/dymint" ) - func (b *Block) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - func (b *Batch) MarshalBinary() ([]byte, error) { return b.ToProto().Marshal() } - func (b *Block) UnmarshalBinary(data []byte) error { var pBlock pb.Block err := pBlock.Unmarshal(data) @@ -33,7 +30,6 @@ func (b *Block) UnmarshalBinary(data []byte) error { return err } - func (b *Batch) UnmarshalBinary(data []byte) error { var pBatch pb.Batch err := pBatch.Unmarshal(data) @@ -44,12 +40,10 @@ func (b *Batch) UnmarshalBinary(data []byte) error { return err } - func (h *Header) MarshalBinary() ([]byte, error) { return h.ToProto().Marshal() } - func (h *Header) UnmarshalBinary(data []byte) error { var pHeader pb.Header err := pHeader.Unmarshal(data) @@ -60,17 +54,14 @@ func (h *Header) UnmarshalBinary(data []byte) error { return err } - func (d *Data) MarshalBinary() ([]byte, error) { return d.ToProto().Marshal() } - func (c *Commit) MarshalBinary() ([]byte, error) { return c.ToProto().Marshal() } - func (c *Commit) UnmarshalBinary(data []byte) error { var pCommit pb.Commit err := pCommit.Unmarshal(data) @@ -81,7 +72,6 @@ func (c *Commit) UnmarshalBinary(data []byte) error { return err } - func (h *Header) ToProto() *pb.Header { return &pb.Header{ Version: &pb.Version{Block: h.Version.Block, App: h.Version.App}, @@ -101,7 +91,6 @@ func (h *Header) ToProto() *pb.Header { } } - func (h *Header) FromProto(other *pb.Header) error { h.Version.Block = other.Version.Block h.Version.App = other.Version.App @@ -140,8 +129,6 @@ func (h *Header) FromProto(other *pb.Header) error { return nil } - - func safeCopy(dst, src []byte) bool { if len(src) != len(dst) { return false @@ -150,7 +137,6 @@ func safeCopy(dst, src []byte) bool { return true } - func (b *Block) ToProto() *pb.Block { return &pb.Block{ Header: b.Header.ToProto(), @@ -159,7 +145,6 @@ func (b *Block) ToProto() *pb.Block { } } - func (b *Batch) ToProto() *pb.Batch { return &pb.Batch{ StartHeight: b.StartHeight(), @@ -169,7 +154,6 @@ func (b *Batch) ToProto() *pb.Batch { } } - func (d *Data) ToProto() *pb.Data { return &pb.Data{ Txs: txsToByteSlices(d.Txs), @@ -179,7 +163,6 @@ func (d *Data) ToProto() *pb.Data { } } - func (b *Block) FromProto(other *pb.Block) error { err := b.Header.FromProto(other.Header) if err != nil { @@ -199,7 +182,6 @@ func (b *Block) FromProto(other *pb.Block) error { return nil } - func (b *Batch) FromProto(other *pb.Batch) error { n := len(other.Blocks) start := other.StartHeight @@ -215,7 +197,6 @@ func (b *Batch) FromProto(other *pb.Batch) error { return nil } - func (c *Commit) ToProto() *pb.Commit { return &pb.Commit{ Height: c.Height, @@ -230,14 +211,13 @@ func (c *Commit) ToProto() *pb.Commit { } } - func (c *Commit) FromProto(other *pb.Commit) error { c.Height = other.Height if !safeCopy(c.HeaderHash[:], other.HeaderHash) { return errors.New("invalid length of HeaderHash") } c.Signatures = byteSlicesToSignatures(other.Signatures) - + if other.TmSignature != nil { c.TMSignature = types.CommitSig{ BlockIDFlag: types.BlockIDFlag(other.TmSignature.BlockIdFlag), @@ -250,7 +230,6 @@ func (c *Commit) FromProto(other *pb.Commit) error { return nil } - func (s *State) ToProto() (*pb.State, error) { var proposerProto *pb.Sequencer proposer := s.GetProposer() @@ -265,25 +244,24 @@ func (s *State) ToProto() (*pb.State, error) { return &pb.State{ Version: &s.Version, ChainId: s.ChainID, - InitialHeight: int64(s.InitialHeight), - LastBlockHeight: int64(s.Height()), + InitialHeight: int64(s.InitialHeight), + LastBlockHeight: int64(s.Height()), ConsensusParams: s.ConsensusParams, LastResultsHash: s.LastResultsHash[:], LastHeaderHash: s.LastHeaderHash[:], AppHash: s.AppHash[:], RollappParams: s.RollappParams, Proposer: proposerProto, - RevisionStartHeight: int64(s.RevisionStartHeight), + RevisionStartHeight: int64(s.RevisionStartHeight), }, nil } - func (s *State) FromProto(other *pb.State) error { s.Version = *other.Version s.ChainID = other.ChainId - s.InitialHeight = uint64(other.InitialHeight) - s.SetHeight(uint64(other.LastBlockHeight)) - s.RevisionStartHeight = uint64(other.RevisionStartHeight) + s.InitialHeight = uint64(other.InitialHeight) + s.SetHeight(uint64(other.LastBlockHeight)) + s.RevisionStartHeight = uint64(other.RevisionStartHeight) if other.Proposer != nil { proposer, err := SequencerFromProto(other.Proposer) if err != nil { @@ -291,7 +269,6 @@ func (s *State) FromProto(other *pb.State) error { } s.SetProposer(proposer) } else { - s.SetProposer(nil) } @@ -303,7 +280,6 @@ func (s *State) FromProto(other *pb.State) error { return nil } - func (s *Sequencer) ToProto() (*pb.Sequencer, error) { if s == nil { return nil, fmt.Errorf("nil sequencer") @@ -320,7 +296,6 @@ func (s *Sequencer) ToProto() (*pb.Sequencer, error) { }, nil } - func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { if seq == nil { return nil, fmt.Errorf("nil sequencer") @@ -337,7 +312,6 @@ func SequencerFromProto(seq *pb.Sequencer) (*Sequencer, error) { }, nil } - func (s Sequencers) ToProto() (*pb.SequencerSet, error) { seqs := make([]pb.Sequencer, len(s)) for i, seq := range s { @@ -350,7 +324,6 @@ func (s Sequencers) ToProto() (*pb.SequencerSet, error) { return &pb.SequencerSet{Sequencers: seqs}, nil } - func SequencersFromProto(s *pb.SequencerSet) (Sequencers, error) { if s == nil { return Sequencers{}, fmt.Errorf("nil sequencer set") @@ -389,7 +362,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { var ret []*abci.Evidence for _, e := range evidence.Evidence { for _, ae := range e.ABCI() { - ret = append(ret, &ae) + ret = append(ret, &ae) } } return ret @@ -397,7 +370,7 @@ func evidenceToProto(evidence EvidenceData) []*abci.Evidence { func evidenceFromProto([]*abci.Evidence) EvidenceData { var ret EvidenceData - + return ret } @@ -423,7 +396,6 @@ func byteSlicesToSignatures(bytes [][]byte) []Signature { return sigs } - func blocksToProto(blocks []*Block) []*pb.Block { pbBlocks := make([]*pb.Block, len(blocks)) for i, b := range blocks { @@ -432,7 +404,6 @@ func blocksToProto(blocks []*Block) []*pb.Block { return pbBlocks } - func protoToBlocks(pbBlocks []*pb.Block) []*Block { blocks := make([]*Block, len(pbBlocks)) for i, b := range pbBlocks { @@ -445,7 +416,6 @@ func protoToBlocks(pbBlocks []*pb.Block) []*Block { return blocks } - func commitsToProto(commits []*Commit) []*pb.Commit { pbCommits := make([]*pb.Commit, len(commits)) for i, c := range commits { @@ -454,7 +424,6 @@ func commitsToProto(commits []*Commit) []*pb.Commit { return pbCommits } - func protoToCommits(pbCommits []*pb.Commit) []*Commit { commits := make([]*Commit, len(pbCommits)) for i, c := range pbCommits { diff --git a/types/state.go b/types/state.go index cf1442b18..b23d48942 100644 --- a/types/state.go +++ b/types/state.go @@ -5,8 +5,6 @@ import ( "fmt" "sync/atomic" - - tmcrypto "github.com/tendermint/tendermint/crypto" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -16,34 +14,25 @@ import ( const rollappparams_modulename = "rollappparams" - type State struct { Version tmstate.Version RevisionStartHeight uint64 - + ChainID string - InitialHeight uint64 + InitialHeight uint64 - LastBlockHeight atomic.Uint64 - Proposer atomic.Pointer[Sequencer] - - ConsensusParams tmproto.ConsensusParams - LastResultsHash [32]byte - AppHash [32]byte - RollappParams dymint.RollappParams - LastHeaderHash [32]byte } @@ -59,7 +48,6 @@ func (s *State) GetProposerPubKey() tmcrypto.PubKey { return proposer.PubKey() } - func (s *State) GetProposerHash() []byte { proposer := s.Proposer.Load() if proposer == nil { @@ -68,7 +56,6 @@ func (s *State) GetProposerHash() []byte { return proposer.MustHash() } - func (s *State) SetProposer(proposer *Sequencer) { s.Proposer.Store(proposer) } @@ -81,18 +68,14 @@ type RollappParams struct { Params *dymint.RollappParams } - - func (s *State) SetHeight(height uint64) { s.LastBlockHeight.Store(height) } - func (s *State) Height() uint64 { return s.LastBlockHeight.Load() } - func (s *State) NextHeight() uint64 { if s.IsGenesis() { return s.InitialHeight @@ -100,7 +83,6 @@ func (s *State) NextHeight() uint64 { return s.Height() + 1 } - func (s *State) SetRollappParamsFromGenesis(appState json.RawMessage) error { var objmap map[string]json.RawMessage err := json.Unmarshal(appState, &objmap) diff --git a/types/tx.go b/types/tx.go index fe4d1f6fa..5d083273b 100644 --- a/types/tx.go +++ b/types/tx.go @@ -6,20 +6,14 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" ) - type Tx []byte - type Txs []Tx - func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } - - - func (txs Txs) Proof(i int) TxProof { l := len(txs) bzs := make([][]byte, l) @@ -35,7 +29,6 @@ func (txs Txs) Proof(i int) TxProof { } } - type TxProof struct { RootHash tmbytes.HexBytes `json:"root_hash"` Data Tx `json:"data"` diff --git a/types/validation.go b/types/validation.go index 3b7c37f48..6eef146a0 100644 --- a/types/validation.go +++ b/types/validation.go @@ -21,7 +21,6 @@ func ValidateProposedTransition(state *State, block *Block, commit *Commit, prop return nil } - func (b *Block) ValidateBasic() error { err := b.Header.ValidateBasic() if err != nil { @@ -93,7 +92,6 @@ func (b *Block) ValidateWithState(state *State) error { return nil } - func (h *Header) ValidateBasic() error { if len(h.ProposerAddress) == 0 { return ErrEmptyProposerAddress @@ -102,13 +100,10 @@ func (h *Header) ValidateBasic() error { return nil } - - func (d *Data) ValidateBasic() error { return nil } - func (c *Commit) ValidateBasic() error { if c.Height > 0 { if len(c.Signatures) != 1 { @@ -133,7 +128,6 @@ func (c *Commit) ValidateWithHeader(proposerPubKey tmcrypto.PubKey, header *Head return err } - if !proposerPubKey.VerifySignature(abciHeaderBytes, c.Signatures[0]) { return NewErrInvalidSignatureFraud(ErrInvalidSignature, header, c) } diff --git a/utils/atomic/funcs.go b/utils/atomic/funcs.go index d6cca097e..6214c3a9a 100644 --- a/utils/atomic/funcs.go +++ b/utils/atomic/funcs.go @@ -4,10 +4,6 @@ import ( "sync/atomic" ) - - - func Uint64Sub(x *atomic.Uint64, y uint64) uint64 { - return x.Add(^(y - 1)) } diff --git a/utils/channel/funcs.go b/utils/channel/funcs.go index 2513314dd..7f658a6c7 100644 --- a/utils/channel/funcs.go +++ b/utils/channel/funcs.go @@ -1,7 +1,5 @@ package channel - - func DrainForever[T any](chs ...<-chan T) { for _, ch := range chs { go func() { @@ -12,17 +10,14 @@ func DrainForever[T any](chs ...<-chan T) { } } - - type Nudger struct { - C chan struct{} + C chan struct{} } func NewNudger() *Nudger { return &Nudger{make(chan struct{})} } - func (w Nudger) Nudge() { select { case w.C <- struct{}{}: diff --git a/utils/errors/err_group.go b/utils/errors/err_group.go index 6ddb5d414..448be967a 100644 --- a/utils/errors/err_group.go +++ b/utils/errors/err_group.go @@ -5,12 +5,6 @@ import ( "golang.org/x/sync/errgroup" ) - - - - - - func ErrGroupGoLog(eg *errgroup.Group, logger types.Logger, fn func() error) { eg.Go(func() error { err := fn() diff --git a/utils/event/funcs.go b/utils/event/funcs.go index 000cbf3f0..616807837 100644 --- a/utils/event/funcs.go +++ b/utils/event/funcs.go @@ -12,9 +12,6 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) - - - func MustSubscribe( ctx context.Context, pubsubServer *pubsub.Server, @@ -46,7 +43,6 @@ func MustSubscribe( } } - func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface{}, events map[string][]string) { err := pubsubServer.PublishWithEvents(ctx, msg, events) if err != nil && !errors.Is(err, context.Canceled) { @@ -54,7 +50,6 @@ func MustPublish(ctx context.Context, pubsubServer *pubsub.Server, msg interface } } - func QueryFor(eventTypeKey, eventType string) tmpubsub.Query { return tmquery.MustParse(fmt.Sprintf("%s='%s'", eventTypeKey, eventType)) } diff --git a/utils/queue/queue.go b/utils/queue/queue.go index 4600ddd86..eca1b9462 100644 --- a/utils/queue/queue.go +++ b/utils/queue/queue.go @@ -5,40 +5,32 @@ import ( "strings" ) - - type Queue[T any] struct { elements []T } - func FromSlice[T any](s []T) *Queue[T] { return &Queue[T]{elements: s} } - func New[T any]() *Queue[T] { return &Queue[T]{elements: make([]T, 0)} } - func (q *Queue[T]) Enqueue(values ...T) { q.elements = append(q.elements, values...) } - func (q *Queue[T]) DequeueAll() []T { values := q.elements q.elements = make([]T, 0) return values } - func (q *Queue[T]) Size() int { return len(q.elements) } - func (q *Queue[T]) String() string { str := "Queue[" values := []string{} diff --git a/utils/retry/backoff.go b/utils/retry/backoff.go index b9276edaa..c48a96344 100644 --- a/utils/retry/backoff.go +++ b/utils/retry/backoff.go @@ -10,14 +10,12 @@ const ( defaultBackoffFactor = 2 ) - type BackoffConfig struct { InitialDelay time.Duration `json:"initial_delay"` MaxDelay time.Duration `json:"max_delay"` GrowthFactor float64 `json:"growth_factor"` } - func (c BackoffConfig) Backoff() Backoff { return Backoff{ delay: c.InitialDelay, @@ -40,16 +38,12 @@ func WithInitialDelay(d time.Duration) BackoffOption { } } - - func WithMaxDelay(d time.Duration) BackoffOption { return func(b *BackoffConfig) { b.MaxDelay = d } } - - func WithGrowthFactor(x float64) BackoffOption { return func(b *BackoffConfig) { b.GrowthFactor = x @@ -68,7 +62,6 @@ func NewBackoffConfig(opts ...BackoffOption) BackoffConfig { return ret } - func (b *Backoff) Delay() time.Duration { ret := b.delay b.delay = time.Duration(float64(b.delay) * b.growthFactor) @@ -78,7 +71,6 @@ func (b *Backoff) Delay() time.Duration { return ret } - func (b *Backoff) Sleep() { time.Sleep(b.Delay()) } diff --git a/utils/retry/doc.go b/utils/retry/doc.go index 6d41b0f16..8bc36c91d 100644 --- a/utils/retry/doc.go +++ b/utils/retry/doc.go @@ -1,4 +1 @@ - - - package retry diff --git a/version/version.go b/version/version.go index d461e5b9b..5b9325ac7 100644 --- a/version/version.go +++ b/version/version.go @@ -15,5 +15,5 @@ func GetDRSVersion() (uint32, error) { if err != nil { return uint32(0), fmt.Errorf("converting DRS version to int: %v", err) } - return uint32(currentDRS), nil + return uint32(currentDRS), nil }