diff --git a/claimtxman/claimtxman.go b/claimtxman/claimtxman.go index 4af2a1215..1d79175b4 100644 --- a/claimtxman/claimtxman.go +++ b/claimtxman/claimtxman.go @@ -66,7 +66,7 @@ func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *et var monitorTx ctmtypes.TxMonitorer if cfg.GroupingClaims.Enabled { log.Info("ClaimTxManager working in compressor mode to group claim txs") - monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime()) + monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime(), cfg.GroupingClaims.GasOffset) } else { log.Info("ClaimTxManager working in regular mode to send claim txs individually") monitorTx = NewMonitorTxs(ctx, storage.(StorageInterface), client, cfg, nonceCache, auth) @@ -94,7 +94,8 @@ func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *et func (tm *ClaimTxManager) Start() { ticker := time.NewTicker(tm.cfg.FrequencyToMonitorTxs.Duration) compressorTicker := time.NewTicker(tm.cfg.GroupingClaims.FrequencyToProcessCompressedClaims.Duration) - var ger *etherman.GlobalExitRoot + var ger = ðerman.GlobalExitRoot{} + var latestProcessedGer common.Hash for { select { case <-tm.ctx.Done(): @@ -121,7 +122,7 @@ func (tm *ClaimTxManager) Start() { log.Infof("Waiting for networkID %d to be synced before processing deposits", tm.l2NetworkID) } case <-compressorTicker.C: - if tm.synced && tm.cfg.GroupingClaims.Enabled { + if tm.synced && tm.cfg.GroupingClaims.Enabled && ger.GlobalExitRoot != latestProcessedGer { log.Info("Processing deposits for ger: ", ger.GlobalExitRoot) go func() { err := tm.updateDepositsStatus(ger) @@ -129,6 +130,7 @@ func (tm *ClaimTxManager) Start() { log.Errorf("failed to update deposits status: %v", err) } }() + latestProcessedGer = ger.GlobalExitRoot } case <-ticker.C: err := tm.monitorTxs.MonitorTxs(tm.ctx) diff --git a/claimtxman/compose_compress_claim.go b/claimtxman/compose_compress_claim.go index c4b67f482..5de01cd9b 100644 --- a/claimtxman/compose_compress_claim.go +++ b/claimtxman/compose_compress_claim.go @@ -6,7 +6,6 @@ import ( "fmt" "math/big" "slices" - "strings" ctmtypes "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types" "github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/claimcompressor" @@ -44,13 +43,13 @@ type bridgeClaimXParams struct { } type ComposeCompressClaim struct { - smcAbi abi.ABI + bridgeContractABI *abi.ABI methodClaimAssets abi.Method methodClaimMessages abi.Method } func NewComposeCompressClaim() (*ComposeCompressClaim, error) { - smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmbridge.PolygonzkevmbridgeABI)) + smcAbi, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() if err != nil { return nil, errors.New("fails to read abi fom Bridge contract") } @@ -63,7 +62,7 @@ func NewComposeCompressClaim() (*ComposeCompressClaim, error) { return nil, errors.New("method claimMessages not found") } return &ComposeCompressClaim{ - smcAbi: smcAbi, + bridgeContractABI: smcAbi, methodClaimAssets: methodClaimAssets, methodClaimMessages: methodClaimMessages, }, nil @@ -129,7 +128,7 @@ func (c *ComposeCompressClaim) extractParams(data []byte) (*bridgeClaimXParams, } func (c *ComposeCompressClaim) extractParamsClaimX(data []byte) (*bridgeClaimXParams, error) { // do something - method, err := c.smcAbi.MethodById(data[:4]) + method, err := c.bridgeContractABI.MethodById(data[:4]) if err != nil { return nil, fmt.Errorf("extracting params, getting method err: %w ", err) } diff --git a/claimtxman/config.go b/claimtxman/config.go index c0a535d32..13f30b169 100644 --- a/claimtxman/config.go +++ b/claimtxman/config.go @@ -42,4 +42,6 @@ type ConfigGroupingClaims struct { RetryInterval types.Duration `mapstructure:"RetryInterval"` // RetryTimeout is the maximum time to wait for a claim tx to be mined RetryTimeout types.Duration `mapstructure:"RetryTimeout"` + // GasOffset is the offset for the gas estimation + GasOffset uint64 `mapstructure:"GasOffset"` } diff --git a/claimtxman/monitor_compressed_txs.go b/claimtxman/monitor_compressed_txs.go index 5150b66a9..686b9f147 100644 --- a/claimtxman/monitor_compressed_txs.go +++ b/claimtxman/monitor_compressed_txs.go @@ -34,7 +34,7 @@ type StorageCompressedInterface interface { type EthermanI interface { CompressClaimCall(mainnetExitRoot, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error) - SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (common.Hash, error) + SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error) } type MonitorCompressedTxs struct { storage StorageCompressedInterface @@ -48,6 +48,7 @@ type MonitorCompressedTxs struct { compressClaimComposer *ComposeCompressClaim timeProvider utils.TimeProvider triggerGroups *GroupsTrigger + gasOffset uint64 } func NewMonitorCompressedTxs(ctx context.Context, @@ -57,7 +58,8 @@ func NewMonitorCompressedTxs(ctx context.Context, nonceCache *NonceCache, auth *bind.TransactOpts, etherMan EthermanI, - timeProvider utils.TimeProvider) *MonitorCompressedTxs { + timeProvider utils.TimeProvider, + gasOffset uint64) *MonitorCompressedTxs { composer, err := NewComposeCompressClaim() if err != nil { log.Fatal("failed to create ComposeCompressClaim: %v", err) @@ -73,6 +75,7 @@ func NewMonitorCompressedTxs(ctx context.Context, compressClaimComposer: composer, timeProvider: timeProvider, triggerGroups: NewGroupsTrigger(cfg.GroupingClaims), + gasOffset: gasOffset, } } @@ -317,17 +320,32 @@ func (tm *MonitorCompressedTxs) SendClaims(pendingTx *PendingTxs, onlyFirstOne b continue } + // Estimating Gas + auth := *tm.auth + auth.NoSend = true + estimatedTx, err := tm.etherMan.SendCompressedClaims(&auth, group.DbEntry.CompressedTxData) + if err != nil { + msg := fmt.Sprintf("failed to call SMC SendCompressedClaims for group %d: %v", group.DbEntry.GroupID, err) + log.Warn(msg) + group.DbEntry.LastLog = msg + continue + } + auth.NoSend = false + log.Debug("estimatedGAS: ", estimatedTx.Gas()) + auth.GasLimit = estimatedTx.Gas() + tm.gasOffset + log.Debug("New GAS: ", auth.GasLimit) // Send claim tx - txHash, err := tm.etherMan.SendCompressedClaims(tm.auth, group.DbEntry.CompressedTxData) + tx, err := tm.etherMan.SendCompressedClaims(&auth, group.DbEntry.CompressedTxData) if err != nil { msg := fmt.Sprintf("failed to call SMC SendCompressedClaims for group %d: %v", group.DbEntry.GroupID, err) log.Warn(msg) group.DbEntry.LastLog = msg continue } - log.Infof("send claim tx try: %d for group_id:%d deposits_id:%s txHash:%s", group.DbEntry.NumRetries, group.DbEntry.GroupID, group.GetTxsDepositIDString(), txHash.String()) + log.Debug("Gas used: ", tx.Gas()) + log.Infof("Send claim tx try: %d for group_id:%d deposits_id:%s txHash:%s", group.DbEntry.NumRetries, group.DbEntry.GroupID, group.GetTxsDepositIDString(), tx.Hash().String()) group.DbEntry.Status = ctmtypes.MonitoredTxGroupStatusClaiming - group.DbEntry.AddPendingTx(txHash) + group.DbEntry.AddPendingTx(tx.Hash()) group.DbEntry.NumRetries++ } return nil diff --git a/config/config.debug.toml b/config/config.debug.toml index 7dcaaaf75..8a4c42085 100644 --- a/config/config.debug.toml +++ b/config/config.debug.toml @@ -27,6 +27,7 @@ AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"] RetryInterval = "10s" RetryTimeout = "30s" FrequencyToProcessCompressedClaims = "1m" + GasOffset = 100000 [Etherman] L1URL = "http://localhost:8545" @@ -57,7 +58,7 @@ BridgeVersion = "v1" MaxConns = 20 [NetworkConfig] -GenBlockNumber = 1 +GenBlockNumber = 0 PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" diff --git a/config/config.local.toml b/config/config.local.toml index 23ef87ffe..cafb8a1ff 100644 --- a/config/config.local.toml +++ b/config/config.local.toml @@ -27,6 +27,7 @@ AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"] RetryInterval = "10s" RetryTimeout = "30s" FrequencyToProcessCompressedClaims = "1m" + GasOffset = 100000 [Etherman] L1URL = "http://zkevm-mock-l1-network:8545" @@ -57,7 +58,7 @@ BridgeVersion = "v1" MaxConns = 20 [NetworkConfig] -GenBlockNumber = 1 +GenBlockNumber = 0 PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e" diff --git a/config/default.go b/config/default.go index 0db37aa69..c4c387f6b 100644 --- a/config/default.go +++ b/config/default.go @@ -38,6 +38,7 @@ AuthorizedClaimMessageAddresses = [] MaxRetries = 2 RetryInterval = "10s" RetryTimeout = "30s" + GasOffset = 0 [Etherman] diff --git a/db/pgstorage/migrations/0010.sql b/db/pgstorage/migrations/0010.sql new file mode 100644 index 000000000..5f546c395 --- /dev/null +++ b/db/pgstorage/migrations/0010.sql @@ -0,0 +1,25 @@ +-- +migrate Up + +-- This migration will delete all empty blocks +DELETE FROM sync.block +WHERE NOT EXISTS (SELECT * + FROM sync.claim + WHERE sync.claim.block_id = sync.block.id) + AND NOT EXISTS (SELECT * + FROM sync.deposit + WHERE sync.deposit.block_id = sync.block.id) + AND NOT EXISTS (SELECT * + FROM sync.token_wrapped + WHERE sync.token_wrapped.block_id = sync.block.id) + AND NOT EXISTS (SELECT * + FROM sync.exit_root + WHERE sync.exit_root.block_id = sync.block.id) + AND NOT EXISTS (SELECT * + FROM mt.rollup_exit + WHERE mt.rollup_exit.block_id = sync.block.id) + AND sync.block.id != 0; + + +-- +migrate Down + +-- no action is needed, the data must remain deleted as it is useless \ No newline at end of file diff --git a/db/pgstorage/migrations/0010_test.go b/db/pgstorage/migrations/0010_test.go new file mode 100644 index 000000000..91c8be963 --- /dev/null +++ b/db/pgstorage/migrations/0010_test.go @@ -0,0 +1,93 @@ +package migrations_test + +import ( + "database/sql" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// this migration changes length of the token name +type migrationTest0010 struct{} + +func (m migrationTest0010) InsertData(db *sql.DB) error { + addBlocks := ` + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(1, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b20', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fe', '2024-03-11 02:52:23.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(2, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b21', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f1', '2024-03-11 02:52:24.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(3, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b22', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f2', '2024-03-11 02:52:25.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(4, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b23', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f3', '2024-03-11 02:52:26.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(5, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b24', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f4', '2024-03-11 02:52:27.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(6, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b25', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f5', '2024-03-11 02:52:28.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(7, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b26', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f6', '2024-03-11 02:52:29.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(8, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b27', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f7', '2024-03-11 02:52:30.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(9, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b28', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f8', '2024-03-11 02:52:31.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(10, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b29', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f9', '2024-03-11 02:52:32.000', 0); + INSERT INTO sync.block + (block_num, block_hash, parent_hash, received_at, network_id) + VALUES(11, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b2a', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fa', '2024-03-11 02:52:33.000', 0); + INSERT INTO sync.claim + (network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag) + VALUES(1, 0, 0, decode('0000000000000000000000000000000000000000','hex'), '100000000000000000', decode('F35960302A07022ABA880DFFAEC2FDD64D5BF1C1','hex'), 2, decode('3E24EC7286B5138DE66E8B2B854EE957579B2651B3A454AD32C55A985364FAFF','hex'), 0, false); + INSERT INTO sync.deposit + (leaf_type, network_id, orig_net, orig_addr, amount, dest_net, dest_addr, block_id, deposit_cnt, tx_hash, metadata, id, ready_for_claim) + VALUES(0, 1, 0, decode('0000000000000000000000000000000000000000','hex'), '2000000000000000', 0, decode('2536C2745AC4A584656A830F7BDCD329C94E8F30','hex'), 3, 1, decode('1E615900D623C9291992C79ED156A950BE7DA69B8E58A67DC6F2BCDE2EB236FC','hex'), decode('','hex'), 2, true); + INSERT INTO sync.token_wrapped + (network_id, orig_net, orig_token_addr, wrapped_token_addr, block_id, "name", symbol, decimals) + VALUES(1, 0, decode('5AA6D983DECB146A5810BB28CCD2554F29176AB6','hex'), decode('6014E48D6C37CD0953E86F511CF04DDD7C37029D','hex'), 5, 'ToniToken', 'TRM', 18); + INSERT INTO mt.rollup_exit + (id, leaf, rollup_id, root, block_id) + VALUES(1, decode('4C907345C62B48529CE718F3A32E8BE63A3AE02831386A638419C6CBE6606558','hex'), 1, decode('3CAF4160ABD2C2160305420728BDFECC882456DCA00247FEAFC2C00ADA3E19E0','hex'), 6); + INSERT INTO sync.exit_root + (id, block_id, global_exit_root, exit_roots) + VALUES(1, 8, decode('AD3228B676F7D3CD4284A5443F17F1962B36E491B30A40B2405849E597BA5FB5','hex'), '{decode(''5C7830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030'',''hex''),decode(''5C7830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030'',''hex'')}'); + ` + if _, err := db.Exec(addBlocks); err != nil { + return err + } + blockCount := `SELECT count(*) FROM sync.block` + var count int + err := db.QueryRow(blockCount).Scan(&count) + if err != nil { + return err + } + if count != 12 { + return fmt.Errorf("error: initial wrong number of blocks: %d", count) + } + return nil +} + +func (m migrationTest0010) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + blockCount := `SELECT count(*) FROM sync.block` + var count int + err := db.QueryRow(blockCount).Scan(&count) + assert.NoError(t, err) + assert.Equal(t, 6, count) +} + +func (m migrationTest0010) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { +} + +func TestMigration0010(t *testing.T) { + runMigrationTest(t, 10, migrationTest0010{}) +} diff --git a/etherman/etherman.go b/etherman/etherman.go index 7cb341d59..ec7614381 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -765,13 +765,13 @@ func (etherMan *Client) AddExistingRollupEvent(ctx context.Context, vLog types.L return nil } -func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (common.Hash, error) { +func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error) { claimTx, err := etherMan.ClaimCompressor.SendCompressedClaims(auth, compressedTxData) if err != nil { log.Error("failed to call SMC SendCompressedClaims: %v", err) - return common.Hash{}, err + return nil, err } - return claimTx.Hash(), err + return claimTx, err } func (etherMan *Client) CompressClaimCall(mainnetExitRoot, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error) { diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index bd0e44c21..4984ffd0c 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -39,7 +39,7 @@ type ClientSynchronizer struct { // NewSynchronizer creates and initializes an instance of Synchronizer func NewSynchronizer( - ctx context.Context, + parentCtx context.Context, storage interface{}, bridge bridgectrlInterface, ethMan ethermanInterface, @@ -48,7 +48,7 @@ func NewSynchronizer( chExitRootEvent chan *etherman.GlobalExitRoot, chSynced chan uint, cfg Config) (Synchronizer, error) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(parentCtx) networkID, err := ethMan.GetNetworkID(ctx) if err != nil { log.Fatal("error getting networkID. Error: ", err) @@ -182,6 +182,7 @@ func (s *ClientSynchronizer) Sync() error { // Stop function stops the synchronizer func (s *ClientSynchronizer) Stop() { + log.Info("Stopping synchronizer and cancelling context") s.cancelCtx() } @@ -224,8 +225,14 @@ func (s *ClientSynchronizer) syncTrustedState() error { // This function syncs the node from a specific block to the latest func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*etherman.Block, error) { + // Call the blockchain to retrieve data + header, err := s.etherMan.HeaderByNumber(s.ctx, nil) + if err != nil { + return lastBlockSynced, err + } + lastKnownBlock := header.Number // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. - block, err := s.checkReorg(lastBlockSynced) + block, err := s.checkReorg(lastBlockSynced, nil) if err != nil { log.Errorf("networkID: %d, error checking reorgs. Retrying... Err: %s", s.networkID, err.Error()) return lastBlockSynced, fmt.Errorf("networkID: %d, error checking reorgs", s.networkID) @@ -239,20 +246,22 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether return block, nil } log.Debugf("NetworkID: %d, after checkReorg: no reorg detected", s.networkID) - // Call the blockchain to retrieve data - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) - if err != nil { - return lastBlockSynced, err - } - lastKnownBlock := header.Number var fromBlock uint64 if lastBlockSynced.BlockNumber > 0 { - fromBlock = lastBlockSynced.BlockNumber + 1 + fromBlock = lastBlockSynced.BlockNumber } + toBlock := fromBlock + s.cfg.SyncChunkSize for { - toBlock := fromBlock + s.cfg.SyncChunkSize + if toBlock > lastKnownBlock.Uint64() { + log.Debug("Setting toBlock to the lastKnownBlock: ", lastKnownBlock) + toBlock = lastKnownBlock.Uint64() + } + if fromBlock > toBlock { + log.Debug("FromBlock is higher than toBlock. Skipping...") + return lastBlockSynced, nil + } log.Debugf("NetworkID: %d, Getting bridge info from block %d to block %d", s.networkID, fromBlock, toBlock) // This function returns the rollup information contained in the ethereum blocks and an extra param called order. @@ -263,6 +272,63 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether if err != nil { return lastBlockSynced, err } + + if fromBlock == s.genBlockNumber { + if len(blocks) == 0 || (len(blocks) != 0 && blocks[0].BlockNumber != s.genBlockNumber) { + log.Debugf("NetworkID: %d. adding genesis empty block", s.networkID) + blocks = append([]etherman.Block{{}}, blocks...) + } + } else if fromBlock < s.genBlockNumber { + err := fmt.Errorf("NetworkID: %d. fromBlock %d is lower than the genesisBlockNumber %d", s.networkID, fromBlock, s.genBlockNumber) + log.Warn(err) + return lastBlockSynced, err + } + var initBlockReceived *etherman.Block + if len(blocks) != 0 { + initBlockReceived = &blocks[0] + // First position of the array must be deleted + blocks = removeBlockElement(blocks, 0) + } else { + // Reorg detected + log.Infof("NetworkID: %d, reorg detected in block %d while querying GetRollupInfoByBlockRange. Rolling back to at least the previous block", s.networkID, fromBlock) + prevBlock, err := s.storage.GetPreviousBlock(s.ctx, s.networkID, 1, nil) + if errors.Is(err, gerror.ErrStorageNotFound) { + log.Warnf("networkID: %d, error checking reorg: previous block not found in db: %v", s.networkID, err) + prevBlock = ðerman.Block{} + } else if err != nil { + log.Errorf("networkID: %d, error getting previousBlock from db. Error: %v", s.networkID, err) + return lastBlockSynced, err + } + blockReorged, err := s.checkReorg(prevBlock, nil) + if err != nil { + log.Errorf("networkID: %d, error checking reorgs in previous blocks. Error: %v", s.networkID, err) + return lastBlockSynced, err + } + if blockReorged == nil { + blockReorged = prevBlock + } + err = s.resetState(blockReorged.BlockNumber) + if err != nil { + log.Errorf("networkID: %d, error resetting the state to a previous block. Retrying... Err: %v", s.networkID, err) + return lastBlockSynced, fmt.Errorf("error resetting the state to a previous block") + } + return blockReorged, nil + } + // Check reorg again to be sure that the chain has not changed between the previous checkReorg and the call GetRollupInfoByBlockRange + block, err := s.checkReorg(lastBlockSynced, initBlockReceived) + if err != nil { + log.Errorf("networkID: %d, error checking reorgs. Retrying... Err: %v", s.networkID, err) + return lastBlockSynced, fmt.Errorf("networkID: %d, error checking reorgs", s.networkID) + } + if block != nil { + err = s.resetState(block.BlockNumber) + if err != nil { + log.Errorf("networkID: %d, error resetting the state to a previous block. Retrying... Err: %v", s.networkID, err) + return lastBlockSynced, fmt.Errorf("networkID: %d, error resetting the state to a previous block", s.networkID) + } + return block, nil + } + err = s.processBlockRange(blocks, order) if err != nil { return lastBlockSynced, err @@ -273,7 +339,6 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether log.Debug("NetworkID: ", s.networkID, ", Position: ", i, ". BlockNumber: ", blocks[i].BlockNumber, ". BlockHash: ", blocks[i].BlockHash) } } - fromBlock = toBlock + 1 if lastKnownBlock.Cmp(new(big.Int).SetUint64(toBlock)) < 1 { if !s.synced { @@ -284,32 +349,19 @@ func (s *ClientSynchronizer) syncBlocks(lastBlockSynced *etherman.Block) (*ether } break } - if len(blocks) == 0 { // If there is no events in the checked blocks range and lastKnownBlock > fromBlock. - // Store the latest block of the block range. Get block info and process the block - fb, err := s.etherMan.EthBlockByNumber(s.ctx, toBlock) - if err != nil { - return lastBlockSynced, err - } - b := etherman.Block{ - BlockNumber: fb.NumberU64(), - BlockHash: fb.Hash(), - ParentHash: fb.ParentHash(), - ReceivedAt: time.Unix(int64(fb.Time()), 0), - } - err = s.processBlockRange([]etherman.Block{b}, order) - if err != nil { - return lastBlockSynced, err - } - - lastBlockSynced = &b - log.Debugf("NetworkID: %d, Storing empty block. BlockNumber: %d. BlockHash: %s", - s.networkID, b.BlockNumber, b.BlockHash.String()) - } + fromBlock = lastBlockSynced.BlockNumber + toBlock = toBlock + s.cfg.SyncChunkSize } return lastBlockSynced, nil } +func removeBlockElement(slice []etherman.Block, s int) []etherman.Block { + ret := make([]etherman.Block, 0) + ret = append(ret, slice[:s]...) + return append(ret, slice[s+1:]...) +} + func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { // New info has to be included into the db using the state var isNewGer bool @@ -462,67 +514,73 @@ If hash or hash parent don't match, reorg detected and the function will return must be reverted. Then, check the previous ethereum block synced, get block info from the blockchain and check hash and has parent. This operation has to be done until a match is found. */ -func (s *ClientSynchronizer) checkReorg(latestBlock *etherman.Block) (*etherman.Block, error) { +func (s *ClientSynchronizer) checkReorg(latestStoredBlock, syncedBlock *etherman.Block) (*etherman.Block, error) { // This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info. - latestBlockSynced := *latestBlock + latestStoredEthBlock := *latestStoredBlock + reorgedBlock := *latestStoredBlock var depth uint64 + block := syncedBlock for { - block, err := s.etherMan.EthBlockByNumber(s.ctx, latestBlock.BlockNumber) - if err != nil { - log.Errorf("networkID: %d, error getting latest block synced from blockchain. Block: %d, error: %v", - s.networkID, latestBlock.BlockNumber, err) - return nil, err - } - if block.NumberU64() != latestBlock.BlockNumber { - err = fmt.Errorf("networkID: %d, wrong ethereum block retrieved from blockchain. Block numbers don't match."+ - " BlockNumber stored: %d. BlockNumber retrieved: %d", s.networkID, latestBlock.BlockNumber, block.NumberU64()) - log.Error("error: ", err) - return nil, err - } - // Compare hashes - if (block.Hash() != latestBlock.BlockHash || block.ParentHash() != latestBlock.ParentHash) && latestBlock.BlockNumber > s.genBlockNumber { - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockNumber: ", latestBlock.BlockNumber, block.NumberU64()) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHash: ", block.Hash()) - log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHashParent: ", block.ParentHash()) - depth++ - log.Info("NetworkID: ", s.networkID, ", REORG: Looking for the latest correct block. Depth: ", depth) - // Reorg detected. Getting previous block - dbTx, err := s.storage.BeginDBTransaction(s.ctx) + if block == nil { + log.Infof("NetworkID: %d, [checkReorg function] Checking Block %d in L1", s.networkID, reorgedBlock.BlockNumber) + b, err := s.etherMan.EthBlockByNumber(s.ctx, reorgedBlock.BlockNumber) if err != nil { - log.Errorf("networkID: %d, error creating db transaction to get previous blocks. Error: %v", s.networkID, err) + log.Errorf("networkID: %d, error getting latest block synced from blockchain. Block: %d, error: %v", s.networkID, reorgedBlock.BlockNumber, err) return nil, err } - latestBlock, err = s.storage.GetPreviousBlock(s.ctx, s.networkID, depth, dbTx) - errC := s.storage.Commit(s.ctx, dbTx) - if errC != nil { - log.Errorf("networkID: %d, error committing dbTx, err: %v", s.networkID, errC) - rollbackErr := s.storage.Rollback(s.ctx, dbTx) - if rollbackErr != nil { - log.Errorf("networkID: %d, error rolling back state. RollbackErr: %v, err: %s", - s.networkID, rollbackErr, errC.Error()) - return nil, rollbackErr - } - return nil, errC + block = ðerman.Block{ + BlockNumber: b.Number().Uint64(), + BlockHash: b.Hash(), + ParentHash: b.ParentHash(), + } + if block.BlockNumber != reorgedBlock.BlockNumber { + err := fmt.Errorf("networkID: %d, wrong ethereum block retrieved from blockchain. Block numbers don't match. BlockNumber stored: %d. BlockNumber retrieved: %d", + s.networkID, reorgedBlock.BlockNumber, block.BlockNumber) + log.Error("error: ", err) + return nil, err } + } else { + log.Infof("NetworkID: %d, [checkReorg function] Using block %d from GetRollupInfoByBlockRange", s.networkID, block.BlockNumber) + } + log.Infof("NetworkID: %d, [checkReorg function] BlockNumber: %d BlockHash got from L1 provider: %s", s.networkID, block.BlockNumber, block.BlockHash.String()) + log.Infof("NetworkID: %d, [checkReorg function] reorgedBlockNumber: %d reorgedBlockHash already synced: %s", s.networkID, reorgedBlock.BlockNumber, reorgedBlock.BlockHash.String()) + + // Compare hashes + if (block.BlockHash != reorgedBlock.BlockHash || block.ParentHash != reorgedBlock.ParentHash) && reorgedBlock.BlockNumber > s.genBlockNumber { + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => reorgedBlockNumber: ", reorgedBlock.BlockNumber) + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => reorgedBlockHash: ", reorgedBlock.BlockHash) + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => reorgedBlockHashParent: ", reorgedBlock.ParentHash) + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockNumber: ", reorgedBlock.BlockNumber, block.BlockNumber) + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHash: ", block.BlockHash) + log.Info("NetworkID: ", s.networkID, ", [checkReorg function] => BlockHashParent: ", block.ParentHash) + depth++ + log.Info("NetworkID: ", s.networkID, ", REORG: Looking for the latest correct ethereum block. Depth: ", depth) + // Reorg detected. Getting previous block + lb, err := s.storage.GetPreviousBlock(s.ctx, s.networkID, depth, nil) if errors.Is(err, gerror.ErrStorageNotFound) { log.Warnf("networkID: %d, error checking reorg: previous block not found in db: %v", s.networkID, err) - return ðerman.Block{}, nil + reorgedBlock = etherman.Block{ + BlockNumber: s.genBlockNumber, + } + return &reorgedBlock, nil } else if err != nil { - log.Errorf("networkID: %d, error detected getting previous block: %v", s.networkID, err) + log.Errorf("networkID: %d, error getting previousBlock from db. Error: %v", s.networkID, err) return nil, err } + reorgedBlock = *lb } else { + log.Debugf("networkID: %d, checkReorg: Block %d hashOk %t parentHashOk %t", s.networkID, reorgedBlock.BlockNumber, block.BlockHash == reorgedBlock.BlockHash, block.ParentHash == reorgedBlock.ParentHash) break } + // This forces to get the block from L1 in the next iteration of the loop + block = nil } - if latestBlockSynced.BlockHash != latestBlock.BlockHash { - log.Infof("NetworkID: %d, reorg detected in block: %d", s.networkID, latestBlockSynced.BlockNumber) - return latestBlock, nil + if latestStoredEthBlock.BlockHash != reorgedBlock.BlockHash { + latestStoredBlock = &reorgedBlock + log.Info("NetworkID: ", s.networkID, ", reorg detected in block: ", latestStoredEthBlock.BlockNumber, " last block OK: ", latestStoredBlock.BlockNumber) + return latestStoredBlock, nil } - log.Debugf("NetworkID: %d, no reorg detected", s.networkID) + log.Debugf("NetworkID: %d, no reorg detected in block: %d. BlockHash: %s", s.networkID, latestStoredEthBlock.BlockNumber, latestStoredEthBlock.BlockHash.String()) return nil, nil } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 8855af406..5b3928db9 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -26,7 +26,7 @@ type mocks struct { func TestSyncGer(t *testing.T) { setupMocks := func(m *mocks) Synchronizer { - genBlockNumber := uint64(123456) + genBlockNumber := uint64(0) cfg := Config{ SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, @@ -37,7 +37,8 @@ func TestSyncGer(t *testing.T) { m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() chEvent := make(chan *etherman.GlobalExitRoot) chSynced := make(chan uint) - sync, err := NewSynchronizer(context.Background(), m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + parentCtx := context.Background() + sync, err := NewSynchronizer(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) require.NoError(t, err) go func() { @@ -47,16 +48,19 @@ func TestSyncGer(t *testing.T) { t.Log("New GER received") case netID := <-chSynced: t.Log("Synced networkID: ", netID) - case <-context.Background().Done(): + case <-parentCtx.Done(): + t.Log("Stopping parentCtx...") return } } }() parentHash := common.HexToHash("0x111") - ethHeader := &types.Header{Number: big.NewInt(1), ParentHash: parentHash} - ethBlock := types.NewBlockWithHeader(ethHeader) - lastBlock := ðerman.Block{BlockHash: ethBlock.Hash(), BlockNumber: ethBlock.Number().Uint64()} + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethHeader0.Hash()} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + lastBlock := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64()} var networkID uint = 0 m.Storage. @@ -65,13 +69,13 @@ func TestSyncGer(t *testing.T) { m.Etherman. On("EthBlockByNumber", ctx, lastBlock.BlockNumber). - Return(ethBlock, nil). + Return(ethBlock0, nil). Once() var n *big.Int m.Etherman. On("HeaderByNumber", ctx, n). - Return(ethHeader, nil). + Return(ethHeader1, nil). Once() globalExitRoot := etherman.GlobalExitRoot{ @@ -82,15 +86,19 @@ func TestSyncGer(t *testing.T) { }, GlobalExitRoot: common.HexToHash("0xb14c74e4dddf25627a745f46cae6ac98782e2783c3ccc28107c8210e60d58864"), } - - ethermanBlock := etherman.Block{ - BlockHash: ethBlock.Hash(), + ethermanBlock0 := etherman.Block{ + BlockHash: ethBlock0.Hash(), + NetworkID: 0, + } + ethermanBlock1 := etherman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock1.Hash(), GlobalExitRoots: []etherman.GlobalExitRoot{globalExitRoot}, NetworkID: 0, } - blocks := []etherman.Block{ethermanBlock} + blocks := []etherman.Block{ethermanBlock0, ethermanBlock1} order := map[common.Hash][]etherman.Order{ - ethBlock.Hash(): { + ethBlock1.Hash(): { { Name: etherman.GlobalExitRootsOrder, Pos: 0, @@ -98,9 +106,11 @@ func TestSyncGer(t *testing.T) { }, } - fromBlock := ethBlock.NumberU64() + 1 + fromBlock := ethBlock0.NumberU64() toBlock := fromBlock + cfg.SyncChunkSize - + if toBlock > ethBlock1.NumberU64() { + toBlock = ethBlock1.NumberU64() + } m.Etherman. On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). Return(blocks, order, nil). @@ -112,12 +122,12 @@ func TestSyncGer(t *testing.T) { Once() m.Storage. - On("AddBlock", ctx, &blocks[0], m.DbTx). + On("AddBlock", ctx, &blocks[1], m.DbTx). Return(uint64(1), nil). Once() m.Storage. - On("AddGlobalExitRoot", ctx, &blocks[0].GlobalExitRoots[0], m.DbTx). + On("AddGlobalExitRoot", ctx, &blocks[1].GlobalExitRoots[0], m.DbTx). Return(nil). Once() @@ -129,7 +139,7 @@ func TestSyncGer(t *testing.T) { m.Storage. On("GetLatestL1SyncedExitRoot", ctx, nil). - Return(&blocks[0].GlobalExitRoots[0], nil). + Return(&blocks[1].GlobalExitRoots[0], nil). Once() g := common.HexToHash("0xb14c74e4dddf25627a745f46cae6ac98782e2783c3ccc28107c8210e60d58861") @@ -179,3 +189,1161 @@ func TestSyncGer(t *testing.T) { require.NoError(t, err) }) } + +func TestReorg(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + parentContext := context.Background() + m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + sync, err := NewSynchronizer(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentContext.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethHeader1bis := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := types.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := types.NewBlockWithHeader(ethHeader2bis) + ethHeader3bis := &types.Header{Number: big.NewInt(3), ParentHash: ethBlock2bis.Hash()} + ethBlock3bis := types.NewBlockWithHeader(ethHeader3bis) + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + ethHeader2 := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := types.NewBlockWithHeader(ethHeader2) + ethHeader3 := &types.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := types.NewBlockWithHeader(ethHeader3) + + lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + var networkID uint = 0 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock1, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3bis, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock1.BlockNumber). + Return(ethBlock1, nil). + Once() + + ti := time.Date(2024, 1, 1, 1, 0, 0, 0, time.UTC) + + ethermanBlock1bis := etherman.Block{ + BlockNumber: 1, + ReceivedAt: ti, + BlockHash: ethBlock1bis.Hash(), + ParentHash: ethBlock1bis.ParentHash(), + } + ethermanBlock2bis := etherman.Block{ + BlockNumber: 2, + ReceivedAt: ti, + BlockHash: ethBlock2bis.Hash(), + ParentHash: ethBlock2bis.ParentHash(), + } + blocks := []etherman.Block{ethermanBlock1bis, ethermanBlock2bis} + order := map[common.Hash][]etherman.Order{} + + fromBlock := ethBlock1.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock3.NumberU64() { + toBlock = ethBlock3.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + var depth uint64 = 1 + stateBlock0 := ðerman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + ReceivedAt: ti, + } + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock0, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("Reset", ctx, ethBlock0.NumberU64(), networkID, m.DbTx). + Return(nil). + Once() + + depositCnt := 1 + m.Storage. + On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). + Return(uint64(depositCnt), nil). + Once() + + m.BridgeCtrl. + On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3bis, nil). + Twice() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + ethermanBlock0 := etherman.Block{ + BlockNumber: 0, + ReceivedAt: ti, + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + } + ethermanBlock3bis := etherman.Block{ + BlockNumber: 3, + ReceivedAt: ti, + BlockHash: ethBlock3bis.Hash(), + ParentHash: ethBlock3bis.ParentHash(), + } + fromBlock = 0 + blocks2 := []etherman.Block{ethermanBlock0, ethermanBlock1bis, ethermanBlock2bis, ethermanBlock3bis} + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks2, order, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock1bis := ðerman.Block{ + BlockNumber: ethermanBlock1bis.BlockNumber, + BlockHash: ethermanBlock1bis.BlockHash, + ParentHash: ethermanBlock1bis.ParentHash, + ReceivedAt: ethermanBlock1bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock1bis, m.DbTx). + Return(uint64(1), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock2bis := ðerman.Block{ + BlockNumber: ethermanBlock2bis.BlockNumber, + BlockHash: ethermanBlock2bis.BlockHash, + ParentHash: ethermanBlock2bis.ParentHash, + ReceivedAt: ethermanBlock2bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock2bis, m.DbTx). + Return(uint64(2), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock3bis := ðerman.Block{ + BlockNumber: ethermanBlock3bis.BlockNumber, + BlockHash: ethermanBlock3bis.BlockHash, + ParentHash: ethermanBlock3bis.ParentHash, + ReceivedAt: ethermanBlock3bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock3bis, m.DbTx). + Return(uint64(3), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + ger := common.HexToHash("0x01") + m.ZkEVMClient. + On("GetLatestGlobalExitRoot", ctx). + Return(ger, nil). + Once() + + exitRoots := &rpcTypes.ExitRoots{ + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + } + m.ZkEVMClient. + On("ExitRootsByGER", ctx, ger). + Return(exitRoots, nil). + Once() + + fullGer := ðerman.GlobalExitRoot{ + GlobalExitRoot: ger, + ExitRoots: []common.Hash{ + exitRoots.MainnetExitRoot, + exitRoots.RollupExitRoot, + }, + } + m.Storage. + On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). + Return(true, nil). + Run(func(args mock.Arguments) { + sync.Stop() + }). + Once() + + return sync + } + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} + +func TestLatestSyncedBlockEmpty(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + parentContext := context.Background() + m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + sync, err := NewSynchronizer(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentContext.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + ethHeader2 := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := types.NewBlockWithHeader(ethHeader2) + ethHeader3 := &types.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := types.NewBlockWithHeader(ethHeader3) + + lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + var networkID uint = 0 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock1, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock1.BlockNumber). + Return(ethBlock1, nil). + Once() + + blocks := []etherman.Block{} + order := map[common.Hash][]etherman.Order{} + + fromBlock := ethBlock1.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock3.NumberU64() { + toBlock = ethBlock3.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + ti := time.Date(2024, 1, 1, 1, 0, 0, 0, time.UTC) + var depth uint64 = 1 + stateBlock0 := ðerman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + ReceivedAt: ti, + } + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock0, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("Reset", ctx, ethBlock0.NumberU64(), networkID, m.DbTx). + Return(nil). + Once() + + depositCnt := 1 + m.Storage. + On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). + Return(uint64(depositCnt), nil). + Once() + + m.BridgeCtrl. + On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3, nil). + Twice() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + ethermanBlock0 := etherman.Block{ + BlockNumber: 0, + ReceivedAt: ti, + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + } + blocks = []etherman.Block{ethermanBlock0} + fromBlock = 0 + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + ger := common.HexToHash("0x01") + m.ZkEVMClient. + On("GetLatestGlobalExitRoot", ctx). + Return(ger, nil). + Once() + + exitRoots := &rpcTypes.ExitRoots{ + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + } + m.ZkEVMClient. + On("ExitRootsByGER", ctx, ger). + Return(exitRoots, nil). + Once() + + fullGer := ðerman.GlobalExitRoot{ + GlobalExitRoot: ger, + ExitRoots: []common.Hash{ + exitRoots.MainnetExitRoot, + exitRoots.RollupExitRoot, + }, + } + m.Storage. + On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). + Return(true, nil). + Run(func(args mock.Arguments) { + sync.Stop() + }). + Once() + + return sync + } + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} + +func TestRegularReorg(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + parentContext := context.Background() + m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + sync, err := NewSynchronizer(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentContext.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethHeader1bis := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := types.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := types.NewBlockWithHeader(ethHeader2bis) + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + ethHeader2 := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := types.NewBlockWithHeader(ethHeader2) + + lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + var networkID uint = 0 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock1, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader2bis, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock1.BlockNumber). + Return(ethBlock1bis, nil). + Once() + + ti := time.Date(2024, 1, 1, 1, 0, 0, 0, time.UTC) + var depth uint64 = 1 + stateBlock0 := ðerman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + ReceivedAt: ti, + } + + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock0, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("Reset", ctx, ethBlock0.NumberU64(), networkID, m.DbTx). + Return(nil). + Once() + + depositCnt := 1 + m.Storage. + On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). + Return(uint64(depositCnt), nil). + Once() + + m.BridgeCtrl. + On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader2bis, nil). + Twice() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + ethermanBlock0 := etherman.Block{ + BlockNumber: 0, + ReceivedAt: ti, + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + } + ethermanBlock1bis := etherman.Block{ + BlockNumber: 1, + ReceivedAt: ti, + BlockHash: ethBlock1bis.Hash(), + ParentHash: ethBlock1bis.ParentHash(), + } + ethermanBlock2bis := etherman.Block{ + BlockNumber: 2, + ReceivedAt: ti, + BlockHash: ethBlock2bis.Hash(), + ParentHash: ethBlock2bis.ParentHash(), + } + blocks := []etherman.Block{ethermanBlock0, ethermanBlock1bis, ethermanBlock2bis} + order := map[common.Hash][]etherman.Order{} + + fromBlock := ethBlock0.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock2.NumberU64() { + toBlock = ethBlock2.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock1bis := ðerman.Block{ + BlockNumber: ethermanBlock1bis.BlockNumber, + BlockHash: ethermanBlock1bis.BlockHash, + ParentHash: ethermanBlock1bis.ParentHash, + ReceivedAt: ethermanBlock1bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock1bis, m.DbTx). + Return(uint64(1), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock2bis := ðerman.Block{ + BlockNumber: ethermanBlock2bis.BlockNumber, + BlockHash: ethermanBlock2bis.BlockHash, + ParentHash: ethermanBlock2bis.ParentHash, + ReceivedAt: ethermanBlock2bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock2bis, m.DbTx). + Return(uint64(2), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + ger := common.HexToHash("0x01") + m.ZkEVMClient. + On("GetLatestGlobalExitRoot", ctx). + Return(ger, nil). + Once() + + exitRoots := &rpcTypes.ExitRoots{ + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + } + m.ZkEVMClient. + On("ExitRootsByGER", ctx, ger). + Return(exitRoots, nil). + Once() + + fullGer := ðerman.GlobalExitRoot{ + GlobalExitRoot: ger, + ExitRoots: []common.Hash{ + exitRoots.MainnetExitRoot, + exitRoots.RollupExitRoot, + }, + } + m.Storage. + On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). + Return(true, nil). + Run(func(args mock.Arguments) { + sync.Stop() + }). + Once() + + return sync + } + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} + +func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + parentContext := context.Background() + m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + sync, err := NewSynchronizer(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentContext.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethHeader1bis := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := types.NewBlockWithHeader(ethHeader1bis) + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + ethHeader2 := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := types.NewBlockWithHeader(ethHeader2) + ethHeader3 := &types.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := types.NewBlockWithHeader(ethHeader3) + + lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + lastBlock2 := ðerman.Block{BlockHash: ethBlock2.Hash(), BlockNumber: ethBlock2.Number().Uint64(), ParentHash: ethBlock2.ParentHash()} + var networkID uint = 0 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock2, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock2.BlockNumber). + Return(ethBlock2, nil). + Once() + + blocks := []etherman.Block{} + order := map[common.Hash][]etherman.Order{} + + fromBlock := ethBlock2.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock3.NumberU64() { + toBlock = ethBlock3.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", mock.Anything, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + ti := time.Date(2024, 1, 1, 1, 0, 0, 0, time.UTC) + var depth uint64 = 1 + stateBlock1 := ðerman.Block{ + BlockNumber: ethBlock1.NumberU64(), + BlockHash: ethBlock1.Hash(), + ParentHash: ethBlock1.ParentHash(), + ReceivedAt: ti, + } + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock1, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock1.BlockNumber). + Return(ethBlock1bis, nil). + Once() + + stateBlock0 := ðerman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + ReceivedAt: ti, + } + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock0, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("Reset", ctx, ethBlock0.NumberU64(), networkID, m.DbTx). + Return(nil). + Once() + + depositCnt := 1 + m.Storage. + On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). + Return(uint64(depositCnt), nil). + Once() + + m.BridgeCtrl. + On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader3, nil). + Twice() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + ethermanBlock0 := etherman.Block{ + BlockNumber: 0, + ReceivedAt: ti, + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + } + ethermanBlock1bis := etherman.Block{ + BlockNumber: 1, + ReceivedAt: ti, + BlockHash: ethBlock1.Hash(), + ParentHash: ethBlock1.ParentHash(), + } + blocks = []etherman.Block{ethermanBlock0, ethermanBlock1bis} + fromBlock = 0 + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock1bis := ðerman.Block{ + BlockNumber: ethermanBlock1bis.BlockNumber, + BlockHash: ethermanBlock1bis.BlockHash, + ParentHash: ethermanBlock1bis.ParentHash, + ReceivedAt: ethermanBlock1bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock1bis, m.DbTx). + Return(uint64(1), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + ger := common.HexToHash("0x01") + m.ZkEVMClient. + On("GetLatestGlobalExitRoot", ctx). + Return(ger, nil). + Once() + + exitRoots := &rpcTypes.ExitRoots{ + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + } + m.ZkEVMClient. + On("ExitRootsByGER", ctx, ger). + Return(exitRoots, nil). + Once() + + fullGer := ðerman.GlobalExitRoot{ + GlobalExitRoot: ger, + ExitRoots: []common.Hash{ + exitRoots.MainnetExitRoot, + exitRoots.RollupExitRoot, + }, + } + m.Storage. + On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). + Return(true, nil). + Run(func(args mock.Arguments) { + sync.Stop() + }). + Once() + + return sync + } + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} + +func TestCallFromEmptyBlockAndReorg(t *testing.T) { + setupMocks := func(m *mocks) Synchronizer { + genBlockNumber := uint64(0) + cfg := Config{ + SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, + SyncChunkSize: 10, + } + ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) + parentContext := context.Background() + m.Etherman.On("GetNetworkID", ctx).Return(uint(0), nil) + m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() + m.Storage.On("IsLxLyActivated", ctx, nil).Return(true, nil).Once() + chEvent := make(chan *etherman.GlobalExitRoot) + chSynced := make(chan uint) + sync, err := NewSynchronizer(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, chSynced, cfg) + require.NoError(t, err) + + go func() { + for { + select { + case <-chEvent: + t.Log("New GER received") + case netID := <-chSynced: + t.Log("Synced networkID: ", netID) + case <-parentContext.Done(): + t.Log("Stopping parentCtx...") + return + } + } + }() + parentHash := common.HexToHash("0x111") + ethHeader0 := &types.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := types.NewBlockWithHeader(ethHeader0) + ethHeader1bis := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := types.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := types.NewBlockWithHeader(ethHeader2bis) + ethHeader1 := &types.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := types.NewBlockWithHeader(ethHeader1) + ethHeader2 := &types.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := types.NewBlockWithHeader(ethHeader2) + + lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + var networkID uint = 0 + + m.Storage. + On("GetLastBlock", ctx, networkID, nil). + Return(lastBlock1, nil). + Once() + + var n *big.Int + m.Etherman. + On("HeaderByNumber", ctx, n). + Return(ethHeader2bis, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock1.BlockNumber). + Return(ethBlock1, nil). + Once() + + ti := time.Date(2024, 1, 1, 1, 0, 0, 0, time.UTC) + + ethermanBlock0 := etherman.Block{ + BlockNumber: 0, + ReceivedAt: ti, + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + } + ethermanBlock2bis := etherman.Block{ + BlockNumber: 2, + ReceivedAt: ti, + BlockHash: ethBlock2bis.Hash(), + ParentHash: ethBlock2bis.ParentHash(), + } + blocks := []etherman.Block{ethermanBlock2bis} + order := map[common.Hash][]etherman.Order{} + + fromBlock := ethBlock1.NumberU64() + toBlock := fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock2.NumberU64() { + toBlock = ethBlock2.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + var depth uint64 = 1 + stateBlock0 := ðerman.Block{ + BlockNumber: ethBlock0.NumberU64(), + BlockHash: ethBlock0.Hash(), + ParentHash: ethBlock0.ParentHash(), + ReceivedAt: ti, + } + m.Storage. + On("GetPreviousBlock", ctx, networkID, depth, nil). + Return(stateBlock0, nil). + Once() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + m.Storage. + On("Reset", ctx, ethBlock0.NumberU64(), networkID, m.DbTx). + Return(nil). + Once() + + depositCnt := 1 + m.Storage. + On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). + Return(uint64(depositCnt), nil). + Once() + + m.BridgeCtrl. + On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + Return(nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + m.Etherman. + On("HeaderByNumber", mock.Anything, n). + Return(ethHeader2bis, nil). + Twice() + + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock0.BlockNumber). + Return(ethBlock0, nil). + Once() + + blocks = []etherman.Block{ethermanBlock0, ethermanBlock2bis} + fromBlock = ethBlock0.NumberU64() + toBlock = fromBlock + cfg.SyncChunkSize + if toBlock > ethBlock2.NumberU64() { + toBlock = ethBlock2.NumberU64() + } + m.Etherman. + On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). + Return(blocks, order, nil). + Once() + + m.Storage. + On("BeginDBTransaction", ctx). + Return(m.DbTx, nil). + Once() + + stateBlock2bis := ðerman.Block{ + BlockNumber: ethermanBlock2bis.BlockNumber, + BlockHash: ethermanBlock2bis.BlockHash, + ParentHash: ethermanBlock2bis.ParentHash, + ReceivedAt: ethermanBlock2bis.ReceivedAt, + } + m.Storage. + On("AddBlock", ctx, stateBlock2bis, m.DbTx). + Return(uint64(2), nil). + Once() + + m.Storage. + On("Commit", ctx, m.DbTx). + Return(nil). + Once() + + ger := common.HexToHash("0x01") + m.ZkEVMClient. + On("GetLatestGlobalExitRoot", ctx). + Return(ger, nil). + Once() + + exitRoots := &rpcTypes.ExitRoots{ + MainnetExitRoot: common.Hash{}, + RollupExitRoot: common.Hash{}, + } + m.ZkEVMClient. + On("ExitRootsByGER", ctx, ger). + Return(exitRoots, nil). + Once() + + fullGer := ðerman.GlobalExitRoot{ + GlobalExitRoot: ger, + ExitRoots: []common.Hash{ + exitRoots.MainnetExitRoot, + exitRoots.RollupExitRoot, + }, + } + m.Storage. + On("AddTrustedGlobalExitRoot", ctx, fullGer, nil). + Return(true, nil). + Run(func(args mock.Arguments) { + sync.Stop() + }). + Once() + + return sync + } + m := mocks{ + Etherman: newEthermanMock(t), + BridgeCtrl: newBridgectrlMock(t), + Storage: newStorageMock(t), + DbTx: newDbTxMock(t), + ZkEVMClient: newZkEVMClientMock(t), + } + + // start synchronizing + t.Run("Sync Ger test", func(t *testing.T) { + sync := setupMocks(&m) + err := sync.Sync() + require.NoError(t, err) + }) +} diff --git a/test/scripts/isClaimed/main.go b/test/scripts/isClaimed/main.go new file mode 100644 index 000000000..714324c1b --- /dev/null +++ b/test/scripts/isClaimed/main.go @@ -0,0 +1,33 @@ +package main + +import ( + "context" + + "github.com/0xPolygonHermez/zkevm-bridge-service/log" + "github.com/0xPolygonHermez/zkevm-bridge-service/utils" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" +) + +const ( + bridgeAddr = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E" + + networkURL = "http://localhost:8123" + + depositCnt = 585 + originalNetwork = 0 +) + +func main() { + ctx := context.Background() + client, err := utils.NewClient(ctx, networkURL, common.HexToAddress(bridgeAddr)) + if err != nil { + log.Fatal("Error: ", err) + } + + isClaimed, err := client.Bridge.IsClaimed(&bind.CallOpts{Pending: false}, depositCnt, originalNetwork) + if err != nil { + log.Fatal("error sending deposit. Error: ", err) + } + log.Info("IsCLaimed: ", isClaimed) +} diff --git a/utils/client.go b/utils/client.go index da8b950aa..93a81ce98 100644 --- a/utils/client.go +++ b/utils/client.go @@ -168,7 +168,7 @@ func (c *Client) SendBridgeAsset(ctx context.Context, tokenAddr common.Address, } tx, err := c.Bridge.BridgeAsset(auth, destNetwork, *destAddr, amount, tokenAddr, true, metadata) if err != nil { - log.Error("Error: ", err) + log.Error("error sending deposit. Error: ", err) return err } // wait transfer to be included in a batch