From 790e78a1d72880f0662e3ac0a25d3fa66993c759 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Tue, 26 Mar 2024 07:49:22 -0300 Subject: [PATCH 01/11] fix debug trace to compute egp percentage based on tx execution values (#3503) * fix debug trace to compute egp percentage based on tx execution values * fix trace egp percentage for injected tx --- jsonrpc/endpoints_zkevm.go | 2 +- jsonrpc/mocks/mock_pool.go | 28 ---------------------- jsonrpc/types/interfaces.go | 1 - pool/effectivegasprice.go | 36 +--------------------------- pool/effectivegasprice_test.go | 9 ++++--- pool/pool.go | 2 +- sequencer/finalizer.go | 4 ++-- state/effectivegasprice.go | 44 ++++++++++++++++++++++++++++++++++ state/helper.go | 2 -- state/trace.go | 10 +++++++- 10 files changed, 62 insertions(+), 76 deletions(-) create mode 100644 state/effectivegasprice.go diff --git a/jsonrpc/endpoints_zkevm.go b/jsonrpc/endpoints_zkevm.go index f4c6020ba8..cb56dade8c 100644 --- a/jsonrpc/endpoints_zkevm.go +++ b/jsonrpc/endpoints_zkevm.go @@ -516,7 +516,7 @@ func (z *ZKEVMEndpoints) internalEstimateGasPriceAndFee(ctx context.Context, arg if txEGP.Cmp(txGasPrice) == -1 { // txEGP < txGasPrice // We need to "round" the final effectiveGasPrice to a 256 fraction of the txGasPrice - txEGPPct, err = z.pool.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP) + txEGPPct, err = state.CalculateEffectiveGasPricePercentage(txGasPrice, txEGP) if err != nil { return nil, nil, types.NewRPCError(types.DefaultErrorCode, "failed to calculate effective gas price percentage", err, false) } diff --git a/jsonrpc/mocks/mock_pool.go b/jsonrpc/mocks/mock_pool.go index 7f07d1dc28..7f4e7c2452 100644 --- a/jsonrpc/mocks/mock_pool.go +++ b/jsonrpc/mocks/mock_pool.go @@ -70,34 +70,6 @@ func (_m *PoolMock) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int return r0, r1 } -// CalculateEffectiveGasPricePercentage provides a mock function with given fields: gasPrice, effectiveGasPrice -func (_m *PoolMock) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - ret := _m.Called(gasPrice, effectiveGasPrice) - - if len(ret) == 0 { - panic("no return value specified for CalculateEffectiveGasPricePercentage") - } - - var r0 uint8 - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (uint8, error)); ok { - return rf(gasPrice, effectiveGasPrice) - } - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) uint8); ok { - r0 = rf(gasPrice, effectiveGasPrice) - } else { - r0 = ret.Get(0).(uint8) - } - - if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { - r1 = rf(gasPrice, effectiveGasPrice) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CountPendingTransactions provides a mock function with given fields: ctx func (_m *PoolMock) CountPendingTransactions(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) diff --git a/jsonrpc/types/interfaces.go b/jsonrpc/types/interfaces.go index 526ab3c55c..c12040dea7 100644 --- a/jsonrpc/types/interfaces.go +++ b/jsonrpc/types/interfaces.go @@ -24,7 +24,6 @@ type PoolInterface interface { GetTransactionByHash(ctx context.Context, hash common.Hash) (*pool.Transaction, error) GetTransactionByL2Hash(ctx context.Context, hash common.Hash) (*pool.Transaction, error) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int, txGasUsed uint64, l1GasPrice uint64, l2GasPrice uint64) (*big.Int, error) - CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) EffectiveGasPriceEnabled() bool } diff --git a/pool/effectivegasprice.go b/pool/effectivegasprice.go index 98d488885d..c9ad433774 100644 --- a/pool/effectivegasprice.go +++ b/pool/effectivegasprice.go @@ -2,21 +2,12 @@ package pool import ( "bytes" - "errors" "math/big" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" ) -var ( - // ErrEffectiveGasPriceEmpty happens when the effectiveGasPrice or gasPrice is nil or zero - ErrEffectiveGasPriceEmpty = errors.New("effectiveGasPrice or gasPrice cannot be nil or zero") - - // ErrEffectiveGasPriceIsZero happens when the calculated EffectiveGasPrice is zero - ErrEffectiveGasPriceIsZero = errors.New("effectiveGasPrice cannot be zero") -) - // EffectiveGasPrice implements the effective gas prices calculations and checks type EffectiveGasPrice struct { cfg EffectiveGasPriceCfg @@ -122,33 +113,8 @@ func (e *EffectiveGasPrice) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice bfEffectiveGasPrice.Int(effectiveGasPrice) if effectiveGasPrice.Cmp(new(big.Int).SetUint64(0)) == 0 { - return nil, ErrEffectiveGasPriceIsZero + return nil, state.ErrEffectiveGasPriceIsZero } return effectiveGasPrice, nil } - -// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage -func (e *EffectiveGasPrice) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - const bits = 256 - var bitsBigInt = big.NewInt(bits) - - if effectiveGasPrice == nil || gasPrice == nil || - gasPrice.Cmp(big.NewInt(0)) == 0 || effectiveGasPrice.Cmp(big.NewInt(0)) == 0 { - return 0, ErrEffectiveGasPriceEmpty - } - - if gasPrice.Cmp(effectiveGasPrice) <= 0 { - return state.MaxEffectivePercentage, nil - } - - // Simulate Ceil with integer division - b := new(big.Int).Mul(effectiveGasPrice, bitsBigInt) - b = b.Add(b, gasPrice) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - b = b.Div(b, gasPrice) - // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) - b = b.Sub(b, big.NewInt(1)) //nolint:gomnd - - return uint8(b.Uint64()), nil -} diff --git a/pool/effectivegasprice_test.go b/pool/effectivegasprice_test.go index 96f5a17b9d..c353efdafb 100644 --- a/pool/effectivegasprice_test.go +++ b/pool/effectivegasprice_test.go @@ -4,6 +4,7 @@ import ( "math/big" "testing" + "github.com/0xPolygonHermez/zkevm-node/state" "github.com/stretchr/testify/assert" ) @@ -23,8 +24,6 @@ var ( ) func TestCalculateEffectiveGasPricePercentage(t *testing.T) { - egp := NewEffectiveGasPrice(egpCfg) - testCases := []struct { name string breakEven *big.Int @@ -37,14 +36,14 @@ func TestCalculateEffectiveGasPricePercentage(t *testing.T) { name: "Nil breakEven or gasPrice", gasPrice: big.NewInt(1), expectedValue: uint8(0), - err: ErrEffectiveGasPriceEmpty, + err: state.ErrEffectiveGasPriceEmpty, }, { name: "Zero breakEven or gasPrice", breakEven: big.NewInt(1), gasPrice: big.NewInt(0), expectedValue: uint8(0), - err: ErrEffectiveGasPriceEmpty, + err: state.ErrEffectiveGasPriceEmpty, }, { name: "Both positive, gasPrice less than breakEven", @@ -104,7 +103,7 @@ func TestCalculateEffectiveGasPricePercentage(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := egp.CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) + actual, err := state.CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) assert.Equal(t, tc.err, err) if actual != 0 { assert.Equal(t, tc.expectedValue, actual) diff --git a/pool/pool.go b/pool/pool.go index b15df8ebd2..1677318a9f 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -686,7 +686,7 @@ func (p *Pool) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int, txG // CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage func (p *Pool) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - return p.effectiveGasPrice.CalculateEffectiveGasPricePercentage(gasPrice, effectiveGasPrice) + return state.CalculateEffectiveGasPricePercentage(gasPrice, effectiveGasPrice) } // EffectiveGasPriceEnabled returns if effective gas price calculation is enabled or not diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 70be6e59d1..744fdbeb8f 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -436,7 +436,7 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first } } - egpPercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + egpPercentage, err := state.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) if err != nil { if f.effectiveGasPrice.IsEnabled() { return nil, err @@ -549,7 +549,7 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx // If EffectiveGasPrice is disabled we will calculate the percentage and save it for later logging if !egpEnabled { - effectivePercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) + effectivePercentage, err := state.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice) if err != nil { log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price percentage (#2), error: %v", err) tx.EGPLog.Error = fmt.Sprintf("%s, CalculateEffectiveGasPricePercentage#2: %s", tx.EGPLog.Error, err) diff --git a/state/effectivegasprice.go b/state/effectivegasprice.go new file mode 100644 index 0000000000..69477a147f --- /dev/null +++ b/state/effectivegasprice.go @@ -0,0 +1,44 @@ +package state + +import ( + "errors" + "math/big" +) + +const ( + // MaxEffectivePercentage is the maximum value that can be used as effective percentage + MaxEffectivePercentage = uint8(255) +) + +var ( + // ErrEffectiveGasPriceEmpty happens when the effectiveGasPrice or gasPrice is nil or zero + ErrEffectiveGasPriceEmpty = errors.New("effectiveGasPrice or gasPrice cannot be nil or zero") + + // ErrEffectiveGasPriceIsZero happens when the calculated EffectiveGasPrice is zero + ErrEffectiveGasPriceIsZero = errors.New("effectiveGasPrice cannot be zero") +) + +// CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage +func CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { + const bits = 256 + var bitsBigInt = big.NewInt(bits) + + if effectiveGasPrice == nil || gasPrice == nil || + gasPrice.Cmp(big.NewInt(0)) == 0 || effectiveGasPrice.Cmp(big.NewInt(0)) == 0 { + return 0, ErrEffectiveGasPriceEmpty + } + + if gasPrice.Cmp(effectiveGasPrice) <= 0 { + return MaxEffectivePercentage, nil + } + + // Simulate Ceil with integer division + b := new(big.Int).Mul(effectiveGasPrice, bitsBigInt) + b = b.Add(b, gasPrice) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + b = b.Div(b, gasPrice) + // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd + + return uint8(b.Uint64()), nil +} diff --git a/state/helper.go b/state/helper.go index 300ffcdc99..3b37d121ee 100644 --- a/state/helper.go +++ b/state/helper.go @@ -18,8 +18,6 @@ const ( double = 2 ether155V = 27 etherPre155V = 35 - // MaxEffectivePercentage is the maximum value that can be used as effective percentage - MaxEffectivePercentage = uint8(255) // Decoding constants headerByteLength uint64 = 1 sLength uint64 = 32 diff --git a/state/trace.go b/state/trace.go index 0d835aa2d3..a1c367f067 100644 --- a/state/trace.go +++ b/state/trace.go @@ -78,7 +78,15 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has var effectivePercentage []uint8 for i := 0; i <= count; i++ { txsToEncode = append(txsToEncode, *l2Block.Transactions()[i]) - effectivePercentage = append(effectivePercentage, MaxEffectivePercentage) + txGasPrice := tx.GasPrice() + effectiveGasPrice := receipt.EffectiveGasPrice + egpPercentage, err := CalculateEffectiveGasPricePercentage(txGasPrice, effectiveGasPrice) + if errors.Is(err, ErrEffectiveGasPriceEmpty) { + egpPercentage = MaxEffectivePercentage + } else if err != nil { + return nil, err + } + effectivePercentage = append(effectivePercentage, egpPercentage) log.Debugf("trace will reprocess tx: %v", l2Block.Transactions()[i].Hash().String()) } From ee3630e91f42e2ab81f8c64fa01f1297ff2e622c Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Tue, 26 Mar 2024 11:51:02 +0100 Subject: [PATCH 02/11] fix tx index calculation on receipt (#3488) (#3500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Toni Ramírez <58293609+ToniRamirezM@users.noreply.github.com> --- state/transaction.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/state/transaction.go b/state/transaction.go index f530b4148c..6b7907b8b5 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -243,6 +243,7 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P imStateRoots := make([]common.Hash, 0, numTxs) var receipt *types.Receipt + txIndex := 0 for i, txResponse := range l2Block.TransactionResponses { // if the transaction has an intrinsic invalid tx error it means // the transaction has not changed the state, so we don't store it @@ -262,9 +263,10 @@ func (s *State) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *P storeTxsEGPData = append(storeTxsEGPData, storeTxEGPData) - receipt = GenerateReceipt(header.Number, txResponse, uint(i), forkID) + receipt = GenerateReceipt(header.Number, txResponse, uint(txIndex), forkID) receipts = append(receipts, receipt) imStateRoots = append(imStateRoots, txResp.StateRoot) + txIndex++ } // Create block to be able to calculate its hash From e0d7266f2c0bca8d1b50d330a9695fc170aec542 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Tue, 2 Apr 2024 09:48:23 -0300 Subject: [PATCH 03/11] sort logs by tx hash and then by log index (#3476) --- state/pgstatestorage/pgstatestorage.go | 6 +- state/pgstatestorage/pgstatestorage_test.go | 307 +++++++++++++++++--- 2 files changed, 276 insertions(+), 37 deletions(-) diff --git a/state/pgstatestorage/pgstatestorage.go b/state/pgstatestorage/pgstatestorage.go index 9d17756e29..08604dc6f4 100644 --- a/state/pgstatestorage/pgstatestorage.go +++ b/state/pgstatestorage/pgstatestorage.go @@ -119,7 +119,7 @@ func (p *PostgresStorage) GetStateRootByBatchNumber(ctx context.Context, batchNu return common.HexToHash(stateRootStr), nil } -// GetLogsByBlockNumber get all the logs from a specific block ordered by log index +// GetLogsByBlockNumber get all the logs from a specific block ordered by tx index and log index func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Log, error) { const query = ` SELECT t.l2_block_num, b.block_hash, l.tx_hash, r.tx_index, l.log_index, l.address, l.data, l.topic0, l.topic1, l.topic2, l.topic3 @@ -128,7 +128,7 @@ func (p *PostgresStorage) GetLogsByBlockNumber(ctx context.Context, blockNumber INNER JOIN state.l2block b ON b.block_num = t.l2_block_num INNER JOIN state.receipt r ON r.tx_hash = t.hash WHERE b.block_num = $1 - ORDER BY l.log_index ASC` + ORDER BY r.tx_index ASC, l.log_index ASC` q := p.getExecQuerier(dbTx) rows, err := q.Query(ctx, query, blockNumber) @@ -159,7 +159,7 @@ func (p *PostgresStorage) GetLogs(ctx context.Context, fromBlock uint64, toBlock const queryFilterByBlockHash = `AND b.block_hash = $7 ` const queryFilterByBlockNumbers = `AND b.block_num BETWEEN $7 AND $8 ` - const queryOrder = `ORDER BY b.block_num ASC, l.log_index ASC` + const queryOrder = `ORDER BY b.block_num ASC, r.tx_index ASC, l.log_index ASC` // count queries const queryToCountLogsByBlockHash = "" + diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 29b7f67717..7d934f029e 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -872,7 +872,7 @@ func TestGetLogs(t *testing.T) { ctx := context.Background() cfg := state.Config{ - MaxLogsCount: 8, + MaxLogsCount: 40, MaxLogsBlockRange: 10, ForkIDIntervals: stateCfg.ForkIDIntervals, } @@ -895,39 +895,69 @@ func TestGetLogs(t *testing.T) { time := time.Now() blockNumber := big.NewInt(1) - for i := 0; i < 3; i++ { - tx := types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: nil, - Value: new(big.Int), - Gas: 0, - GasPrice: big.NewInt(0), - }) - - logs := []*types.Log{} - for j := 0; j < 4; j++ { - logs = append(logs, &types.Log{TxHash: tx.Hash(), Index: uint(j)}) - } - - receipt := &types.Receipt{ - Type: tx.Type(), - PostState: state.ZeroHash.Bytes(), - CumulativeGasUsed: 0, - EffectiveGasPrice: big.NewInt(0), - BlockNumber: blockNumber, - GasUsed: tx.Gas(), - TxHash: tx.Hash(), - TransactionIndex: 0, - Status: types.ReceiptStatusSuccessful, - Logs: logs, + maxBlocks := 3 + txsPerBlock := 4 + logsPerTx := 5 + + nonce := uint64(0) + + // number of blocks to be created + for b := 0; b < maxBlocks; b++ { + logIndex := uint(0) + transactions := make([]*types.Transaction, 0, txsPerBlock) + receipts := make([]*types.Receipt, 0, txsPerBlock) + stateRoots := make([]common.Hash, 0, txsPerBlock) + + // number of transactions in a block to be created + for t := 0; t < txsPerBlock; t++ { + nonce++ + txIndex := uint(t + 1) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + logs := []*types.Log{} + + // if block is even logIndex follows a sequence related to the block + // for odd blocks logIndex follows a sequence related ot the tx + // this is needed to simulate a logIndex difference introduced on Etrog + // and we need to maintain to be able to synchronize these blocks + // number of logs in a transaction to be created + for l := 0; l < logsPerTx; l++ { + li := logIndex + if b%2 != 0 { // even block + li = uint(l) + } + + logs = append(logs, &types.Log{TxHash: tx.Hash(), TxIndex: txIndex, Index: li}) + logIndex++ + } + + receipt := &types.Receipt{ + Type: tx.Type(), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: txIndex, + Status: types.ReceiptStatusSuccessful, + Logs: logs, + } + + transactions = append(transactions, tx) + receipts = append(receipts, receipt) + stateRoots = append(stateRoots, state.ZeroHash) } - transactions := []*types.Transaction{tx} - receipts := []*types.Receipt{receipt} - stateRoots := []common.Hash{state.ZeroHash} - header := state.NewL2Header(&types.Header{ - Number: big.NewInt(int64(i) + 1), + Number: big.NewInt(int64(b) + 1), ParentHash: state.ZeroHash, Coinbase: state.ZeroAddress, Root: state.ZeroHash, @@ -954,6 +984,8 @@ func TestGetLogs(t *testing.T) { require.NoError(t, err) } + require.NoError(t, dbTx.Commit(ctx)) + type testCase struct { name string from uint64 @@ -988,20 +1020,227 @@ func TestGetLogs(t *testing.T) { name: "logs returned successfully", from: 1, to: 2, - logCount: 8, + logCount: 40, expectedError: nil, }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - logs, err := testState.GetLogs(ctx, testCase.from, testCase.to, []common.Address{}, [][]common.Hash{}, nil, nil, dbTx) - + logs, err := testState.GetLogs(ctx, testCase.from, testCase.to, []common.Address{}, [][]common.Hash{}, nil, nil, nil) assert.Equal(t, testCase.logCount, len(logs)) assert.Equal(t, testCase.expectedError, err) + + // check tx index and log index order + lastBlockNumber := uint64(0) + lastTxIndex := uint(0) + lastLogIndex := uint(0) + + for i, l := range logs { + // if block has changed and it's not the first log, reset lastTxIndex + if uint(l.BlockNumber) != uint(lastBlockNumber) && i != 0 { + lastTxIndex = 0 + } + + if l.TxIndex < lastTxIndex { + t.Errorf("invalid tx index, expected greater than or equal to %v, but found %v", lastTxIndex, l.TxIndex) + } + // add tolerance for log index Etrog issue that was starting log indexes from 0 for each tx within a block + // if tx index has changed and the log index starts on zero, than resets the lastLogIndex to zero + if l.TxIndex != lastTxIndex && l.Index == 0 { + lastLogIndex = 0 + } + + if l.Index < lastLogIndex { + t.Errorf("invalid log index, expected greater than %v, but found %v", lastLogIndex, l.Index) + } + + lastBlockNumber = l.BlockNumber + lastTxIndex = l.TxIndex + lastLogIndex = l.Index + } + }) + } +} + +func TestGetLogsByBlockNumber(t *testing.T) { + initOrResetDB() + + ctx := context.Background() + + cfg := state.Config{ + MaxLogsCount: 40, + MaxLogsBlockRange: 10, + ForkIDIntervals: stateCfg.ForkIDIntervals, + } + + mt, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) + if err != nil { + panic(err) + } + testState = state.NewState(stateCfg, pgstatestorage.NewPostgresStorage(cfg, stateDb), executorClient, stateTree, nil, mt) + + dbTx, err := testState.BeginStateTransaction(ctx) + require.NoError(t, err) + err = testState.AddBlock(ctx, block, dbTx) + assert.NoError(t, err) + + batchNumber := uint64(1) + _, err = testState.Exec(ctx, "INSERT INTO state.batch (batch_num, wip) VALUES ($1, FALSE)", batchNumber) + assert.NoError(t, err) + + time := time.Now() + blockNumber := big.NewInt(1) + + maxBlocks := 3 + txsPerBlock := 4 + logsPerTx := 5 + + nonce := uint64(0) + + // number of blocks to be created + for b := 0; b < maxBlocks; b++ { + logIndex := uint(0) + transactions := make([]*types.Transaction, 0, txsPerBlock) + receipts := make([]*types.Receipt, 0, txsPerBlock) + stateRoots := make([]common.Hash, 0, txsPerBlock) + + // number of transactions in a block to be created + for t := 0; t < txsPerBlock; t++ { + nonce++ + txIndex := uint(t + 1) + + tx := types.NewTx(&types.LegacyTx{ + Nonce: nonce, + To: nil, + Value: new(big.Int), + Gas: 0, + GasPrice: big.NewInt(0), + }) + + logs := []*types.Log{} + + // if block is even logIndex follows a sequence related to the block + // for odd blocks logIndex follows a sequence related ot the tx + // this is needed to simulate a logIndex difference introduced on Etrog + // and we need to maintain to be able to synchronize these blocks + // number of logs in a transaction to be created + for l := 0; l < logsPerTx; l++ { + li := logIndex + if b%2 != 0 { // even block + li = uint(l) + } + + logs = append(logs, &types.Log{TxHash: tx.Hash(), TxIndex: txIndex, Index: li}) + logIndex++ + } + + receipt := &types.Receipt{ + Type: tx.Type(), + PostState: state.ZeroHash.Bytes(), + CumulativeGasUsed: 0, + EffectiveGasPrice: big.NewInt(0), + BlockNumber: blockNumber, + GasUsed: tx.Gas(), + TxHash: tx.Hash(), + TransactionIndex: txIndex, + Status: types.ReceiptStatusSuccessful, + Logs: logs, + } + + transactions = append(transactions, tx) + receipts = append(receipts, receipt) + stateRoots = append(stateRoots, state.ZeroHash) + } + + header := state.NewL2Header(&types.Header{ + Number: big.NewInt(int64(b) + 1), + ParentHash: state.ZeroHash, + Coinbase: state.ZeroAddress, + Root: state.ZeroHash, + GasUsed: 1, + GasLimit: 10, + Time: uint64(time.Unix()), }) + + st := trie.NewStackTrie(nil) + l2Block := state.NewL2Block(header, transactions, []*state.L2Header{}, receipts, st) + for _, receipt := range receipts { + receipt.BlockHash = l2Block.Hash() + } + + numTxs := len(transactions) + storeTxsEGPData := make([]state.StoreTxEGPData, numTxs) + txsL2Hash := make([]common.Hash, numTxs) + for i := range transactions { + storeTxsEGPData[i] = state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: state.MaxEffectivePercentage} + txsL2Hash[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) + } + + err = testState.AddL2Block(ctx, batchNumber, l2Block, receipts, txsL2Hash, storeTxsEGPData, stateRoots, dbTx) + require.NoError(t, err) } + require.NoError(t, dbTx.Commit(ctx)) + + type testCase struct { + name string + blockNumber uint64 + logCount int + expectedError error + } + + testCases := []testCase{ + { + name: "logs returned successfully", + blockNumber: 1, + logCount: 20, + expectedError: nil, + }, + { + name: "logs returned successfully", + blockNumber: 2, + logCount: 20, + expectedError: nil, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + logs, err := testState.GetLogsByBlockNumber(ctx, testCase.blockNumber, nil) + assert.Equal(t, testCase.logCount, len(logs)) + assert.Equal(t, testCase.expectedError, err) + + // check tx index and log index order + lastBlockNumber := uint64(0) + lastTxIndex := uint(0) + lastLogIndex := uint(0) + + for i, l := range logs { + // if block has changed and it's not the first log, reset lastTxIndex + if uint(l.BlockNumber) != uint(lastBlockNumber) && i != 0 { + lastTxIndex = 0 + } + + if l.TxIndex < lastTxIndex { + t.Errorf("invalid tx index, expected greater than or equal to %v, but found %v", lastTxIndex, l.TxIndex) + } + // add tolerance for log index Etrog issue that was starting log indexes from 0 for each tx within a block + // if tx index has changed and the log index starts on zero, than resets the lastLogIndex to zero + if l.TxIndex != lastTxIndex && l.Index == 0 { + lastLogIndex = 0 + } + + if l.Index < lastLogIndex { + t.Errorf("invalid log index, expected greater than %v, but found %v", lastLogIndex, l.Index) + } + + lastBlockNumber = l.BlockNumber + lastTxIndex = l.TxIndex + lastLogIndex = l.Index + } + }) + } } func TestGetNativeBlockHashesInRange(t *testing.T) { From 31aecea4d496919deef21eb87ef1aca07c3c630b Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Thu, 4 Apr 2024 09:49:12 +0200 Subject: [PATCH 04/11] #3514 Fix l1 info root after reorg (#3515) * Fix ResetL1InfoRoot + typos * mocks * linter * refactor * more robust * Fix comments * Add extra check reorg after calling get information * Fix non e2e test --- aggregator/aggregator.go | 2 +- aggregator/aggregator_test.go | 10 +-- aggregator/interfaces.go | 2 +- aggregator/mocks/mock_state.go | 6 +- l1infotree/tree.go | 21 ++++++- state/interfaces.go | 2 +- state/l1infotree.go | 15 +++-- state/mocks/mock_storage.go | 22 +++---- state/pgstatestorage/l1infotree.go | 2 +- state/reset.go | 17 +++--- .../etrog/processor_l1_sequence_batches.go | 2 +- .../mocks/state_full_interface.go | 47 ++++++++++++++ .../executor_trusted_batch_sync.go | 8 +-- synchronizer/synchronizer.go | 61 +++++++++++++------ synchronizer/synchronizer_test.go | 10 +++ test/e2e/forced_batches_vector_shared.go | 2 +- test/e2e/state_test.go | 2 +- tools/genesis/genesisparser/genesisparser.go | 12 ++-- 18 files changed, 172 insertions(+), 71 deletions(-) diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index c135f301e9..751ade1ff7 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -1021,7 +1021,7 @@ func (a *Aggregator) buildInputProver(ctx context.Context, batchToVerify *state. if err != nil { return nil, err } - leaves, err := a.State.GetLeafsByL1InfoRoot(ctx, *l1InfoRoot, nil) + leaves, err := a.State.GetLeavesByL1InfoRoot(ctx, *l1InfoRoot, nil) if err != nil { return nil, err } diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index a071828a16..9a08afbab5 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -801,7 +801,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(nil, errBanana).Once() @@ -844,7 +844,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -888,7 +888,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -932,7 +932,7 @@ func TestTryGenerateBatchProof(t *testing.T) { } m.etherman.On("GetLatestBlockHeader", mock.Anything).Return(&types.Header{Number: new(big.Int).SetUint64(1)}, nil).Once() m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() @@ -989,7 +989,7 @@ func TestTryGenerateBatchProof(t *testing.T) { TimestampBatchEtrog: &t, } m.stateMock.On("GetVirtualBatch", mock.Anything, lastVerifiedBatchNum+1, nil).Return(&vb, nil).Twice() - m.stateMock.On("GetLeafsByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() + m.stateMock.On("GetLeavesByL1InfoRoot", mock.Anything, *vb.L1InfoRoot, nil).Return([]state.L1InfoTreeExitRootStorageEntry{}, nil).Twice() expectedInputProver, err := a.buildInputProver(context.Background(), &batchToProve) require.NoError(err) m.proverMock.On("BatchProof", expectedInputProver).Return(&proofID, nil).Once() diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 0d6b11b7ed..a6e464e3aa 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -65,7 +65,7 @@ type stateInterface interface { CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (state.L1InfoTreeExitRootStorageEntry, error) - GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) + GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetVirtualBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VirtualBatch, error) diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go index cfc5b66e7d..24d5768523 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_state.go @@ -295,12 +295,12 @@ func (_m *StateMock) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*st return r0, r1 } -// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx -func (_m *StateMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +// GetLeavesByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx +func (_m *StateMock) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { ret := _m.Called(ctx, l1InfoRoot, dbTx) if len(ret) == 0 { - panic("no return value specified for GetLeafsByL1InfoRoot") + panic("no return value specified for GetLeavesByL1InfoRoot") } var r0 []state.L1InfoTreeExitRootStorageEntry diff --git a/l1infotree/tree.go b/l1infotree/tree.go index e0c19da6bf..d3fe48ed2f 100644 --- a/l1infotree/tree.go +++ b/l1infotree/tree.go @@ -26,7 +26,7 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) var err error mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves) if err != nil { - log.Error("error initializing si siblings. Error: ", err) + log.Error("error initializing siblings. Error: ", err) return nil, err } log.Debug("Initial count: ", mt.count) @@ -34,6 +34,25 @@ func NewL1InfoTree(height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) return mt, nil } +// ResetL1InfoTree resets the L1InfoTree. +func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) { + log.Info("Resetting L1InfoTree...") + newMT := &L1InfoTree{ + zeroHashes: generateZeroHashes(32), // nolint:gomnd + height: 32, // nolint:gomnd + count: uint32(len(initialLeaves)), + } + var err error + newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves) + if err != nil { + log.Error("error initializing siblings. Error: ", err) + return nil, err + } + log.Debug("Reset initial count: ", newMT.count) + log.Debug("Reset initial root: ", newMT.currentRoot) + return newMT, nil +} + func buildIntermediate(leaves [][32]byte) ([][][]byte, [][32]byte) { var ( nodes [][][]byte diff --git a/state/interfaces.go b/state/interfaces.go index bbd47d1ba5..ac9c2a0a67 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -146,7 +146,7 @@ type storage interface { GetRawBatchTimestamps(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*time.Time, *time.Time, error) GetL1InfoRootLeafByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTreeIndex uint32, dbTx pgx.Tx) (L1InfoTreeExitRootStorageEntry, error) - GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntry, error) + GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntry, error) GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*Block, error) GetVirtualBatchParentHash(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (common.Hash, error) GetForcedBatchParentHash(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (common.Hash, error) diff --git a/state/l1infotree.go b/state/l1infotree.go index ea89d0e206..8cac9ea5d7 100644 --- a/state/l1infotree.go +++ b/state/l1infotree.go @@ -3,7 +3,6 @@ package state import ( "context" "errors" - "fmt" "github.com/0xPolygonHermez/zkevm-node/l1infotree" "github.com/0xPolygonHermez/zkevm-node/log" @@ -34,20 +33,20 @@ func (s *State) buildL1InfoTreeCacheIfNeed(ctx context.Context, dbTx pgx.Tx) err if s.l1InfoTree != nil { return nil } - log.Debugf("Building L1InfoTree cache") - allLeaves, err := s.storage.GetAllL1InfoRootEntries(ctx, dbTx) + // Reset L1InfoTree siblings and leaves + allLeaves, err := s.GetAllL1InfoRootEntries(ctx, dbTx) if err != nil { - log.Error("error getting all leaves. Error: ", err) - return fmt.Errorf("error getting all leaves. Error: %w", err) + log.Error("error getting all leaves to reset l1InfoTree. Error: ", err) + return err } var leaves [][32]byte for _, leaf := range allLeaves { leaves = append(leaves, leaf.Hash()) } - mt, err := l1infotree.NewL1InfoTree(uint8(32), leaves) //nolint:gomnd + mt, err := s.l1InfoTree.ResetL1InfoTree(leaves) if err != nil { - log.Error("error creating L1InfoTree. Error: ", err) - return fmt.Errorf("error creating L1InfoTree. Error: %w", err) + log.Error("error resetting l1InfoTree. Error: ", err) + return err } s.l1InfoTree = mt return nil diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index 2574697028..d47b4524bd 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -4947,12 +4947,12 @@ func (_c *StorageMock_GetLatestVirtualBatchTimestamp_Call) RunAndReturn(run func return _c } -// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx -func (_m *StorageMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +// GetLeavesByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot, dbTx +func (_m *StorageMock) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { ret := _m.Called(ctx, l1InfoRoot, dbTx) if len(ret) == 0 { - panic("no return value specified for GetLeafsByL1InfoRoot") + panic("no return value specified for GetLeavesByL1InfoRoot") } var r0 []state.L1InfoTreeExitRootStorageEntry @@ -4977,32 +4977,32 @@ func (_m *StorageMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot comm return r0, r1 } -// StorageMock_GetLeafsByL1InfoRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLeafsByL1InfoRoot' -type StorageMock_GetLeafsByL1InfoRoot_Call struct { +// StorageMock_GetLeavesByL1InfoRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLeavesByL1InfoRoot' +type StorageMock_GetLeavesByL1InfoRoot_Call struct { *mock.Call } -// GetLeafsByL1InfoRoot is a helper method to define mock.On call +// GetLeavesByL1InfoRoot is a helper method to define mock.On call // - ctx context.Context // - l1InfoRoot common.Hash // - dbTx pgx.Tx -func (_e *StorageMock_Expecter) GetLeafsByL1InfoRoot(ctx interface{}, l1InfoRoot interface{}, dbTx interface{}) *StorageMock_GetLeafsByL1InfoRoot_Call { - return &StorageMock_GetLeafsByL1InfoRoot_Call{Call: _e.mock.On("GetLeafsByL1InfoRoot", ctx, l1InfoRoot, dbTx)} +func (_e *StorageMock_Expecter) GetLeavesByL1InfoRoot(ctx interface{}, l1InfoRoot interface{}, dbTx interface{}) *StorageMock_GetLeavesByL1InfoRoot_Call { + return &StorageMock_GetLeavesByL1InfoRoot_Call{Call: _e.mock.On("GetLeavesByL1InfoRoot", ctx, l1InfoRoot, dbTx)} } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) Run(run func(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx)) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) Run(run func(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx)) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(common.Hash), args[2].(pgx.Tx)) }) return _c } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntry, _a1 error) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) Return(_a0 []state.L1InfoTreeExitRootStorageEntry, _a1 error) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *StorageMock_GetLeafsByL1InfoRoot_Call) RunAndReturn(run func(context.Context, common.Hash, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error)) *StorageMock_GetLeafsByL1InfoRoot_Call { +func (_c *StorageMock_GetLeavesByL1InfoRoot_Call) RunAndReturn(run func(context.Context, common.Hash, pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error)) *StorageMock_GetLeavesByL1InfoRoot_Call { _c.Call.Return(run) return _c } diff --git a/state/pgstatestorage/l1infotree.go b/state/pgstatestorage/l1infotree.go index 450124dde2..ed3fe2dd38 100644 --- a/state/pgstatestorage/l1infotree.go +++ b/state/pgstatestorage/l1infotree.go @@ -112,7 +112,7 @@ func (p *PostgresStorage) GetL1InfoRootLeafByIndex(ctx context.Context, l1InfoTr return entry, nil } -func (p *PostgresStorage) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { +func (p *PostgresStorage) GetLeavesByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash, dbTx pgx.Tx) ([]state.L1InfoTreeExitRootStorageEntry, error) { // TODO: Optimize this query const getLeafsByL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index FROM state.exit_root diff --git a/state/reset.go b/state/reset.go index 62571250e0..655f5f3dd1 100644 --- a/state/reset.go +++ b/state/reset.go @@ -3,6 +3,7 @@ package state import ( "context" + "github.com/0xPolygonHermez/zkevm-node/log" "github.com/jackc/pgx/v4" ) @@ -13,12 +14,14 @@ func (s *State) Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) erro // - VerifiedBatches // - Entries in exit_root table err := s.ResetToL1BlockNumber(ctx, blockNumber, dbTx) - if err == nil { - // Discard L1InfoTree cache - // We can't rebuild cache, because we are inside a transaction, so we dont known - // is going to be a commit or a rollback. So is going to be rebuild on the next - // request that needs it. - s.l1InfoTree = nil + if err != nil { + log.Error("error resetting L1BlockNumber. Error: ", err) + return err } - return err + // Discard L1InfoTree cache + // We can't rebuild cache, because we are inside a transaction, so we dont known + // is going to be a commit or a rollback. So is going to be rebuild on the next + // request that needs it. + s.l1InfoTree = nil + return nil } diff --git a/synchronizer/actions/etrog/processor_l1_sequence_batches.go b/synchronizer/actions/etrog/processor_l1_sequence_batches.go index aa82c9c791..e1528594d9 100644 --- a/synchronizer/actions/etrog/processor_l1_sequence_batches.go +++ b/synchronizer/actions/etrog/processor_l1_sequence_batches.go @@ -391,7 +391,7 @@ func (p *ProcessorL1SequenceBatchesEtrog) checkTrustedState(ctx context.Context, reason := reorgReasons.String() if p.sync.IsTrustedSequencer() { - log.Errorf("TRUSTED REORG DETECTED! Batch: %d reson:%s", batch.BatchNumber, reason) + log.Errorf("TRUSTED REORG DETECTED! Batch: %d reason:%s", batch.BatchNumber, reason) // Halt function never have to return! it must blocks the process p.halt(ctx, fmt.Errorf("TRUSTED REORG DETECTED! Batch: %d", batch.BatchNumber)) log.Errorf("CRITICAL!!!: Never have to execute this code. Halt function never have to return! it must blocks the process") diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go index f41e906728..1559654641 100644 --- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go @@ -2343,6 +2343,53 @@ func (_c *StateFullInterface_ResetForkID_Call) RunAndReturn(run func(context.Con return _c } +// ResetL1InfoTree provides a mock function with given fields: ctx, dbTx +func (_m *StateFullInterface) ResetL1InfoTree(ctx context.Context, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for ResetL1InfoTree") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { + r0 = rf(ctx, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StateFullInterface_ResetL1InfoTree_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetL1InfoTree' +type StateFullInterface_ResetL1InfoTree_Call struct { + *mock.Call +} + +// ResetL1InfoTree is a helper method to define mock.On call +// - ctx context.Context +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) ResetL1InfoTree(ctx interface{}, dbTx interface{}) *StateFullInterface_ResetL1InfoTree_Call { + return &StateFullInterface_ResetL1InfoTree_Call{Call: _e.mock.On("ResetL1InfoTree", ctx, dbTx)} +} + +func (_c *StateFullInterface_ResetL1InfoTree_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StateFullInterface_ResetL1InfoTree_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_ResetL1InfoTree_Call) Return(_a0 error) *StateFullInterface_ResetL1InfoTree_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StateFullInterface_ResetL1InfoTree_Call) RunAndReturn(run func(context.Context, pgx.Tx) error) *StateFullInterface_ResetL1InfoTree_Call { + _c.Call.Return(run) + return _c +} + // ResetTrustedState provides a mock function with given fields: ctx, batchNumber, dbTx func (_m *StateFullInterface) ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { ret := _m.Called(ctx, batchNumber, dbTx) diff --git a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go index bb1a0798fa..7c89494441 100644 --- a/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go +++ b/synchronizer/l2_sync/l2_sync_etrog/executor_trusted_batch_sync.go @@ -136,13 +136,13 @@ func (b *SyncTrustedBatchExecutorForEtrog) FullProcess(ctx context.Context, data return nil, err } - leafs, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, data.TrustedBatch.BatchL2Data, dbTx) + leaves, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, data.TrustedBatch.BatchL2Data, dbTx) if err != nil { log.Errorf("%s error getting GetL1InfoTreeDataFromBatchL2Data: %v. Error:%w", data.DebugPrefix, l1InfoRoot, err) return nil, err } debugStr := data.DebugPrefix - processBatchResp, err := b.processAndStoreTxs(ctx, b.getProcessRequest(data, leafs, l1InfoRoot), dbTx, debugStr) + processBatchResp, err := b.processAndStoreTxs(ctx, b.getProcessRequest(data, leaves, l1InfoRoot), dbTx, debugStr) if err != nil { log.Error("%s error procesingAndStoringTxs. Error: ", debugStr, err) return nil, err @@ -197,7 +197,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) IncrementalProcess(ctx context.Contex return nil, err } - leafs, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, PartialBatchL2Data, dbTx) + leaves, l1InfoRoot, _, err := b.state.GetL1InfoTreeDataFromBatchL2Data(ctx, PartialBatchL2Data, dbTx) if err != nil { log.Errorf("%s error getting GetL1InfoTreeDataFromBatchL2Data: %v. Error:%w", data.DebugPrefix, l1InfoRoot, err) // TODO: Need to refine, depending of the response of GetL1InfoTreeDataFromBatchL2Data @@ -205,7 +205,7 @@ func (b *SyncTrustedBatchExecutorForEtrog) IncrementalProcess(ctx context.Contex return nil, syncinterfaces.ErrMissingSyncFromL1 } debugStr := fmt.Sprintf("%s: Batch %d:", data.Mode, uint64(data.TrustedBatch.Number)) - processReq := b.getProcessRequest(data, leafs, l1InfoRoot) + processReq := b.getProcessRequest(data, leaves, l1InfoRoot) processReq.Transactions = PartialBatchL2Data processBatchResp, err := b.processAndStoreTxs(ctx, processReq, dbTx, debugStr) if err != nil { diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 280cfe201f..996bbc3907 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -487,6 +487,14 @@ func (s *ClientSynchronizer) syncBlocksParallel(lastEthBlockSynced *state.Block) // This function syncs the node from a specific block to the latest func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Block) (*state.Block, error) { + // Call the blockchain to retrieve data + header, err := s.etherMan.HeaderByNumber(s.ctx, nil) + if err != nil { + log.Error("error getting header of the latest block in L1. Error: ", err) + return lastEthBlockSynced, err + } + lastKnownBlock := header.Number + // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok. block, err := s.checkReorg(lastEthBlockSynced) if err != nil { @@ -502,13 +510,6 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc return block, nil } - // Call the blockchain to retrieve data - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) - if err != nil { - return lastEthBlockSynced, err - } - lastKnownBlock := header.Number - var fromBlock uint64 if lastEthBlockSynced.BlockNumber > 0 { fromBlock = lastEthBlockSynced.BlockNumber + 1 @@ -529,6 +530,22 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc if err != nil { return lastEthBlockSynced, err } + + // Check reorg again to be sure that the chain has not changed between the previous checkReorg and the call GetRollupInfoByBlockRange + block, err := s.checkReorg(lastEthBlockSynced) + if err != nil { + log.Errorf("error checking reorgs. Retrying... Err: %v", err) + return lastEthBlockSynced, fmt.Errorf("error checking reorgs") + } + if block != nil { + err = s.resetState(block.BlockNumber) + if err != nil { + log.Errorf("error resetting the state to a previous block. Retrying... Err: %v", err) + return lastEthBlockSynced, fmt.Errorf("error resetting the state to a previous block") + } + return block, nil + } + start = time.Now() err = s.ProcessBlockRange(blocks, order) metrics.ProcessL1DataTime(time.Since(start)) @@ -722,26 +739,27 @@ hash and has parent. This operation has to be done until a match is found. func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, error) { // This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info. latestEthBlockSynced := *latestBlock + reorgedBlock := *latestBlock var depth uint64 for { - block, err := s.etherMan.EthBlockByNumber(s.ctx, latestBlock.BlockNumber) + block, err := s.etherMan.EthBlockByNumber(s.ctx, reorgedBlock.BlockNumber) if err != nil { - log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", latestBlock.BlockNumber, err) + log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", reorgedBlock.BlockNumber, err) return nil, err } - if block.NumberU64() != latestBlock.BlockNumber { + if block.NumberU64() != reorgedBlock.BlockNumber { err = fmt.Errorf("wrong ethereum block retrieved from blockchain. Block numbers don't match. BlockNumber stored: %d. BlockNumber retrieved: %d", - latestBlock.BlockNumber, block.NumberU64()) + reorgedBlock.BlockNumber, block.NumberU64()) log.Error("error: ", err) return nil, err } // Compare hashes - if (block.Hash() != latestBlock.BlockHash || block.ParentHash() != latestBlock.ParentHash) && latestBlock.BlockNumber > s.genesis.BlockNumber { - log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", latestBlock.BlockNumber, block.Hash() == latestBlock.BlockHash, block.ParentHash() == latestBlock.ParentHash) - log.Debug("[checkReorg function] => latestBlockNumber: ", latestBlock.BlockNumber) - log.Debug("[checkReorg function] => latestBlockHash: ", latestBlock.BlockHash) - log.Debug("[checkReorg function] => latestBlockHashParent: ", latestBlock.ParentHash) - log.Debug("[checkReorg function] => BlockNumber: ", latestBlock.BlockNumber, block.NumberU64()) + if (block.Hash() != reorgedBlock.BlockHash || block.ParentHash() != reorgedBlock.ParentHash) && reorgedBlock.BlockNumber > s.genesis.BlockNumber { + log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash) + log.Debug("[checkReorg function] => latestBlockNumber: ", reorgedBlock.BlockNumber) + log.Debug("[checkReorg function] => latestBlockHash: ", reorgedBlock.BlockHash) + log.Debug("[checkReorg function] => latestBlockHashParent: ", reorgedBlock.ParentHash) + log.Debug("[checkReorg function] => BlockNumber: ", reorgedBlock.BlockNumber, block.NumberU64()) log.Debug("[checkReorg function] => BlockHash: ", block.Hash()) log.Debug("[checkReorg function] => BlockHashParent: ", block.ParentHash()) depth++ @@ -752,7 +770,7 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, log.Errorf("error creating db transaction to get prevoius blocks") return nil, err } - latestBlock, err = s.state.GetPreviousBlock(s.ctx, depth, dbTx) + lb, err := s.state.GetPreviousBlock(s.ctx, depth, dbTx) errC := dbTx.Commit(s.ctx) if errC != nil { log.Errorf("error committing dbTx, err: %v", errC) @@ -768,16 +786,21 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, log.Warn("error checking reorg: previous block not found in db: ", err) return &state.Block{}, nil } else if err != nil { + log.Error("error getting previousBlock from db. Error: ", err) return nil, err } + reorgedBlock = *lb } else { + log.Debugf("checkReorg: Block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash) break } } - if latestEthBlockSynced.BlockHash != latestBlock.BlockHash { + if latestEthBlockSynced.BlockHash != reorgedBlock.BlockHash { + latestBlock = &reorgedBlock log.Info("Reorg detected in block: ", latestEthBlockSynced.BlockNumber, " last block OK: ", latestBlock.BlockNumber) return latestBlock, nil } + log.Debugf("No reorg detected in block: %d. BlockHash: %s", latestEthBlockSynced.BlockNumber, latestEthBlockSynced.BlockHash.String()) return nil, nil } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index bdf3b505b6..edd6927025 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -260,6 +260,11 @@ func TestForcedBatchEtrog(t *testing.T) { Return(blocks, order, nil). Once() + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock.BlockNumber). + Return(ethBlock, nil). + Once() + m.ZKEVMClient. On("BatchNumber", ctx). Return(uint64(1), nil) @@ -509,6 +514,11 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { Return(blocks, order, nil). Once() + m.Etherman. + On("EthBlockByNumber", ctx, lastBlock.BlockNumber). + Return(ethBlock, nil). + Once() + m.State. On("BeginStateTransaction", ctx). Return(m.DbTx, nil). diff --git a/test/e2e/forced_batches_vector_shared.go b/test/e2e/forced_batches_vector_shared.go index e7680bfc74..b84f660fcd 100644 --- a/test/e2e/forced_batches_vector_shared.go +++ b/test/e2e/forced_batches_vector_shared.go @@ -93,7 +93,7 @@ func LaunchTestForcedBatchesVectorFilesGroup(t *testing.T, vectorFilesDir string } log.Info("#######################") - log.Info("# Verifying new leafs #") + log.Info("# Verifying new leaves #") log.Info("#######################") merkleTree := opsman.State().GetTree() for _, expectedNewLeaf := range testCase.ExpectedNewLeafs { diff --git a/test/e2e/state_test.go b/test/e2e/state_test.go index e921597077..20a652547a 100644 --- a/test/e2e/state_test.go +++ b/test/e2e/state_test.go @@ -82,7 +82,7 @@ func TestStateTransition(t *testing.T) { st := opsman.State() - // Check leafs + // Check leaves l2Block, err := st.GetLastL2Block(ctx, nil) require.NoError(t, err) for addrStr, leaf := range testCase.ExpectedNewLeafs { diff --git a/tools/genesis/genesisparser/genesisparser.go b/tools/genesis/genesisparser/genesisparser.go index 27a037ebe0..d6109ff969 100644 --- a/tools/genesis/genesisparser/genesisparser.go +++ b/tools/genesis/genesisparser/genesisparser.go @@ -16,32 +16,32 @@ type GenesisAccountTest struct { // GenesisTest2Actions change format from testvector to the used internaly func GenesisTest2Actions(accounts []GenesisAccountTest) []*state.GenesisAction { - leafs := make([]*state.GenesisAction, 0) + leaves := make([]*state.GenesisAction, 0) for _, acc := range accounts { if len(acc.Balance) != 0 && acc.Balance != "0" { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeBalance), Value: acc.Balance, }) } if len(acc.Nonce) != 0 && acc.Nonce != "0" { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeNonce), Value: acc.Nonce, }) } if len(acc.Bytecode) != 0 { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeCode), Bytecode: acc.Bytecode, }) } for key, value := range acc.Storage { - leafs = append(leafs, &state.GenesisAction{ + leaves = append(leaves, &state.GenesisAction{ Address: acc.Address, Type: int(merkletree.LeafTypeStorage), StoragePosition: key, @@ -49,5 +49,5 @@ func GenesisTest2Actions(accounts []GenesisAccountTest) []*state.GenesisAction { }) } } - return leafs + return leaves } From 26ec1b555371a57e52dcd9349f72108368e379ac Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:17:52 +0200 Subject: [PATCH 05/11] Feature/3518 use generic eth client for l2 (#3519) * #3518 compatibility with ethereum-API L2 node * migrate docker-compose to v2 because ubuntu:latest have deprecated it * fix case trusted URL is not set --- cmd/run.go | 23 ++++- synchronizer/actions/check_l2block.go | 13 ++- synchronizer/actions/check_l2block_test.go | 37 ++++--- ...vm_client_ethereum_compatible_interface.go | 98 +++++++++++++++++++ .../zkevm_ethereum_compatible_client.go | 21 ++++ synchronizer/synchronizer.go | 61 ++++++------ synchronizer/synchronizer_test.go | 34 ++++--- test/Makefile | 2 +- 8 files changed, 221 insertions(+), 68 deletions(-) create mode 100644 synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go create mode 100644 synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go diff --git a/cmd/run.go b/cmd/run.go index 233b4453a6..cbc3f835e2 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -37,6 +37,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/synchronizer" "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces" + "github.com/ethereum/go-ethereum/ethclient" "github.com/jackc/pgx/v4/pgxpool" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/urfave/cli/v2" @@ -280,6 +281,15 @@ func newEtherman(c config.Config) (*etherman.Client, error) { return etherman.NewClient(c.Etherman, c.NetworkConfig.L1Config) } +func newL2EthClient(url string) (*ethclient.Client, error) { + ethClient, err := ethclient.Dial(url) + if err != nil { + log.Errorf("error connecting L1 to %s: %+v", url, err) + return nil, err + } + return ethClient, nil +} + func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerStorage *ethtxmanager.PostgresStorage, st *state.State, pool *pool.Pool, eventLog *event.EventLog) { var trustedSequencerURL string var err error @@ -295,6 +305,17 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerS } log.Info("trustedSequencerURL ", trustedSequencerURL) } + var ethClientForL2 *ethclient.Client + if trustedSequencerURL != "" { + log.Infof("Creating L2 ethereum client %s", trustedSequencerURL) + ethClientForL2, err = newL2EthClient(trustedSequencerURL) + if err != nil { + log.Fatalf("Can't create L2 ethereum client. Err:%w", err) + } + } else { + ethClientForL2 = nil + log.Infof("skipping creating L2 ethereum client because URL is empty") + } zkEVMClient := client.NewClient(trustedSequencerURL) etherManForL1 := []syncinterfaces.EthermanFullInterface{} // If synchronizer are using sequential mode, we only need one etherman client @@ -310,7 +331,7 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManagerS etm := ethtxmanager.New(cfg.EthTxManager, etherman, ethTxManagerStorage, st) sy, err := synchronizer.NewSynchronizer( cfg.IsTrustedSequencer, etherman, etherManForL1, st, pool, etm, - zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, cfg.Log.Environment == "development", + zkEVMClient, ethClientForL2, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, cfg.Log.Environment == "development", ) if err != nil { log.Fatal(err) diff --git a/synchronizer/actions/check_l2block.go b/synchronizer/actions/check_l2block.go index d2d546d6a4..14c9e5cb19 100644 --- a/synchronizer/actions/check_l2block.go +++ b/synchronizer/actions/check_l2block.go @@ -6,9 +6,9 @@ import ( "fmt" "math/big" - "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" ) @@ -129,11 +129,14 @@ func (p *CheckL2BlockHash) iterationCheckL2Block(ctx context.Context, l2BlockNum } func compareL2Blocks(prefixLogs string, localL2Block *state.L2Block, trustedL2Block *types.Block) error { - if localL2Block == nil || trustedL2Block == nil || trustedL2Block.Hash == nil { - return fmt.Errorf("%s localL2Block or trustedL2Block or trustedHash are nil", prefixLogs) + if localL2Block == nil || trustedL2Block == nil { + return fmt.Errorf("%s localL2Block or trustedL2Block are nil", prefixLogs) + } + if localL2Block.Hash() != trustedL2Block.Hash() { + return fmt.Errorf("%s localL2Block.Hash %s and trustedL2Block.Hash %s are different", prefixLogs, localL2Block.Hash().String(), trustedL2Block.Hash().String()) } - if localL2Block.Hash() != *trustedL2Block.Hash { - return fmt.Errorf("%s localL2Block.Hash %s and trustedL2Block.Hash %s are different", prefixLogs, localL2Block.Hash().String(), (*trustedL2Block.Hash).String()) + if localL2Block.ParentHash() != trustedL2Block.ParentHash() { + return fmt.Errorf("%s localL2Block.ParentHash %s and trustedL2Block.ParentHash %s are different", prefixLogs, localL2Block.ParentHash().String(), trustedL2Block.ParentHash().String()) } return nil } diff --git a/synchronizer/actions/check_l2block_test.go b/synchronizer/actions/check_l2block_test.go index da4510fd66..28a8a503b7 100644 --- a/synchronizer/actions/check_l2block_test.go +++ b/synchronizer/actions/check_l2block_test.go @@ -5,7 +5,6 @@ import ( "math/big" "testing" - rpctypes "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/synchronizer/actions" mock_syncinterfaces "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces/mocks" @@ -19,7 +18,7 @@ import ( type CheckL2BlocksTestData struct { sut *actions.CheckL2BlockHash mockState *mock_syncinterfaces.StateFullInterface - zKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + zKEVMClient *mock_syncinterfaces.ZKEVMClientEthereumCompatibleInterface } func TestCheckL2BlockHash_GetMinimumL2BlockToCheck(t *testing.T) { @@ -57,7 +56,7 @@ func TestCheckL2BlockHashNotEnoughBlocksToCheck(t *testing.T) { func newCheckL2BlocksTestData(t *testing.T, initialL2Block, modulus uint64) CheckL2BlocksTestData { res := CheckL2BlocksTestData{ mockState: mock_syncinterfaces.NewStateFullInterface(t), - zKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), + zKEVMClient: mock_syncinterfaces.NewZKEVMClientEthereumCompatibleInterface(t), } res.sut = actions.NewCheckL2BlockHash(res.mockState, res.zKEVMClient, initialL2Block, modulus) return res @@ -97,18 +96,23 @@ func TestCheckL2BlockHashMatch(t *testing.T) { data.mockState.EXPECT().GetLastL2BlockNumber(mock.Anything, mock.Anything).Return(lastL2Block, nil) data.mockState.EXPECT().GetL2BlockByNumber(mock.Anything, lastL2Block, mock.Anything).Return(stateBlock, nil) - l2blockHash := stateBlock.Hash() - rpcL2Block := rpctypes.Block{ - Hash: &l2blockHash, - Number: rpctypes.ArgUint64(lastL2Block), - } + //l2blockHash := stateBlock.Hash() + // rpcL2Block := rpctypes.Block{ + // Hash: &l2blockHash, + // Number: rpctypes.ArgUint64(lastL2Block), + // } + // create a types.Block object + + rpcL2Block := types.NewBlock(&types.Header{ + Number: big.NewInt(int64(lastL2Block)), + }, nil, nil, nil, nil) - data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(&rpcL2Block, nil) + data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(rpcL2Block, nil) err := data.sut.CheckL2Block(context.Background(), nil) require.NoError(t, err) } -func TestCheckL2BlockHashMissmatch(t *testing.T) { +func TestCheckL2BlockHashMismatch(t *testing.T) { data := newCheckL2BlocksTestData(t, 1, 10) lastL2Block := uint64(14) lastL2BlockBigInt := big.NewInt(int64(lastL2Block)) @@ -119,13 +123,14 @@ func TestCheckL2BlockHashMissmatch(t *testing.T) { data.mockState.EXPECT().GetLastL2BlockNumber(mock.Anything, mock.Anything).Return(lastL2Block, nil) data.mockState.EXPECT().GetL2BlockByNumber(mock.Anything, lastL2Block, mock.Anything).Return(stateBlock, nil) - l2blockHash := common.HexToHash("0x1234") - rpcL2Block := rpctypes.Block{ - Hash: &l2blockHash, - Number: rpctypes.ArgUint64(lastL2Block), - } + //l2blockHash := common.HexToHash("0x1234") + + rpcL2Block := types.NewBlock(&types.Header{ + Number: big.NewInt(int64(lastL2Block)), + ParentHash: common.HexToHash("0x1234"), + }, nil, nil, nil, nil) - data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(&rpcL2Block, nil) + data.zKEVMClient.EXPECT().BlockByNumber(mock.Anything, lastL2BlockBigInt).Return(rpcL2Block, nil) err := data.sut.CheckL2Block(context.Background(), nil) require.Error(t, err) } diff --git a/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go b/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go new file mode 100644 index 0000000000..09c0b0f235 --- /dev/null +++ b/synchronizer/common/syncinterfaces/mocks/zkevm_client_ethereum_compatible_interface.go @@ -0,0 +1,98 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock_syncinterfaces + +import ( + context "context" + big "math/big" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// ZKEVMClientEthereumCompatibleInterface is an autogenerated mock type for the ZKEVMClientEthereumCompatibleInterface type +type ZKEVMClientEthereumCompatibleInterface struct { + mock.Mock +} + +type ZKEVMClientEthereumCompatibleInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *ZKEVMClientEthereumCompatibleInterface) EXPECT() *ZKEVMClientEthereumCompatibleInterface_Expecter { + return &ZKEVMClientEthereumCompatibleInterface_Expecter{mock: &_m.Mock} +} + +// BlockByNumber provides a mock function with given fields: ctx, number +func (_m *ZKEVMClientEthereumCompatibleInterface) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for BlockByNumber") + } + + var r0 *types.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *ZKEVMClientEthereumCompatibleInterface_Expecter) BlockByNumber(ctx interface{}, number interface{}) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + return &ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *ZKEVMClientEthereumCompatibleInterface_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// NewZKEVMClientEthereumCompatibleInterface creates a new instance of ZKEVMClientEthereumCompatibleInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewZKEVMClientEthereumCompatibleInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *ZKEVMClientEthereumCompatibleInterface { + mock := &ZKEVMClientEthereumCompatibleInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go b/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go new file mode 100644 index 0000000000..416371dfce --- /dev/null +++ b/synchronizer/common/syncinterfaces/zkevm_ethereum_compatible_client.go @@ -0,0 +1,21 @@ +package syncinterfaces + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" +) + +// ZKEVMClientEthereumCompatibleInterface contains the methods required to interact with zkEVM-RPC as a ethereum-API compatible +// +// Reason behind: the zkEVMClient have some extensions to ethereum-API that are not compatible with all nodes. So if you need to maximize +// the compatibility the idea is to use a regular ethereum-API compatible client +type ZKEVMClientEthereumCompatibleInterface interface { + ZKEVMClientEthereumCompatibleL2BlockGetter +} + +// ZKEVMClientEthereumCompatibleL2BlockGetter contains the methods required to interact with zkEVM-RPC as a ethereum-API compatible for obtain Block information +type ZKEVMClientEthereumCompatibleL2BlockGetter interface { + BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) +} diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 996bbc3907..4cb93bab77 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -52,17 +52,18 @@ type ClientSynchronizer struct { etherMan syncinterfaces.EthermanFullInterface latestFlushID uint64 // If true the lastFlushID is stored in DB and we don't need to check again - latestFlushIDIsFulfilled bool - etherManForL1 []syncinterfaces.EthermanFullInterface - state syncinterfaces.StateFullInterface - pool syncinterfaces.PoolInterface - ethTxManager syncinterfaces.EthTxManager - zkEVMClient syncinterfaces.ZKEVMClientInterface - eventLog syncinterfaces.EventLogInterface - ctx context.Context - cancelCtx context.CancelFunc - genesis state.Genesis - cfg Config + latestFlushIDIsFulfilled bool + etherManForL1 []syncinterfaces.EthermanFullInterface + state syncinterfaces.StateFullInterface + pool syncinterfaces.PoolInterface + ethTxManager syncinterfaces.EthTxManager + zkEVMClient syncinterfaces.ZKEVMClientInterface + zkEVMClientEthereumCompatible syncinterfaces.ZKEVMClientEthereumCompatibleInterface + eventLog syncinterfaces.EventLogInterface + ctx context.Context + cancelCtx context.CancelFunc + genesis state.Genesis + cfg Config // Id of the 'process' of the executor. Each time that it starts this value changes // This value is obtained from the call state.GetStoredFlushID // It starts as an empty string and it is filled in the first call @@ -85,6 +86,7 @@ func NewSynchronizer( pool syncinterfaces.PoolInterface, ethTxManager syncinterfaces.EthTxManager, zkEVMClient syncinterfaces.ZKEVMClientInterface, + zkEVMClientEthereumCompatible syncinterfaces.ZKEVMClientEthereumCompatibleInterface, eventLog syncinterfaces.EventLogInterface, genesis state.Genesis, cfg Config, @@ -92,23 +94,24 @@ func NewSynchronizer( ctx, cancel := context.WithCancel(context.Background()) metrics.Register() res := &ClientSynchronizer{ - isTrustedSequencer: isTrustedSequencer, - state: st, - etherMan: ethMan, - etherManForL1: etherManForL1, - pool: pool, - ctx: ctx, - cancelCtx: cancel, - ethTxManager: ethTxManager, - zkEVMClient: zkEVMClient, - eventLog: eventLog, - genesis: genesis, - cfg: cfg, - proverID: "", - previousExecutorFlushID: 0, - l1SyncOrchestration: nil, - l1EventProcessors: nil, - halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd + isTrustedSequencer: isTrustedSequencer, + state: st, + etherMan: ethMan, + etherManForL1: etherManForL1, + pool: pool, + ctx: ctx, + cancelCtx: cancel, + ethTxManager: ethTxManager, + zkEVMClient: zkEVMClient, + zkEVMClientEthereumCompatible: zkEVMClientEthereumCompatible, + eventLog: eventLog, + genesis: genesis, + cfg: cfg, + proverID: "", + previousExecutorFlushID: 0, + l1SyncOrchestration: nil, + l1EventProcessors: nil, + halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd } if !isTrustedSequencer { @@ -143,7 +146,7 @@ func NewSynchronizer( log.Errorf("error getting last L2Block number from state. Error: %v", err) return nil, err } - l1checkerL2Blocks = actions.NewCheckL2BlockHash(res.state, res.zkEVMClient, initialL2Block, cfg.L1SyncCheckL2BlockNumberhModulus) + l1checkerL2Blocks = actions.NewCheckL2BlockHash(res.state, res.zkEVMClientEthereumCompatible, initialL2Block, cfg.L1SyncCheckL2BlockNumberhModulus) } else { log.Infof("Trusted Node can't check L2Block hash, ignoring parameter") } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index edd6927025..a0be1b3c63 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -32,12 +32,13 @@ const ( ) type mocks struct { - Etherman *mock_syncinterfaces.EthermanFullInterface - State *mock_syncinterfaces.StateFullInterface - Pool *mock_syncinterfaces.PoolInterface - EthTxManager *mock_syncinterfaces.EthTxManager - DbTx *syncMocks.DbTxMock - ZKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + Etherman *mock_syncinterfaces.EthermanFullInterface + State *mock_syncinterfaces.StateFullInterface + Pool *mock_syncinterfaces.PoolInterface + EthTxManager *mock_syncinterfaces.EthTxManager + DbTx *syncMocks.DbTxMock + ZKEVMClient *mock_syncinterfaces.ZKEVMClientInterface + zkEVMClientEthereumCompatible *mock_syncinterfaces.ZKEVMClientEthereumCompatibleInterface //EventLog *eventLogMock } @@ -47,7 +48,7 @@ type mocks struct { func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) { genesis, cfg, m := setupGenericTest(t) ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, false) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, *genesis, *cfg, false) require.NoError(t, err) sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -87,7 +88,7 @@ func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemor func TestGivenPermissionlessNodeWhenSyncronizeFirstTimeABatchThenStoreItInALocalVar(t *testing.T) { genesis, cfg, m := setupGenericTest(t) ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg, false) + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, *genesis, *cfg, false) require.NoError(t, err) sync, ok := syncInterface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -135,7 +136,7 @@ func TestForcedBatchEtrog(t *testing.T) { ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), } ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - sync, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, false) + sync, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, genesis, cfg, false) require.NoError(t, err) // state preparation @@ -393,7 +394,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), } ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} - sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg, false) + sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, genesis, cfg, false) require.NoError(t, err) // state preparation @@ -641,12 +642,13 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { } m := mocks{ - Etherman: mock_syncinterfaces.NewEthermanFullInterface(t), - State: mock_syncinterfaces.NewStateFullInterface(t), - Pool: mock_syncinterfaces.NewPoolInterface(t), - DbTx: syncMocks.NewDbTxMock(t), - ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), - EthTxManager: mock_syncinterfaces.NewEthTxManager(t), + Etherman: mock_syncinterfaces.NewEthermanFullInterface(t), + State: mock_syncinterfaces.NewStateFullInterface(t), + Pool: mock_syncinterfaces.NewPoolInterface(t), + DbTx: syncMocks.NewDbTxMock(t), + ZKEVMClient: mock_syncinterfaces.NewZKEVMClientInterface(t), + zkEVMClientEthereumCompatible: mock_syncinterfaces.NewZKEVMClientEthereumCompatibleInterface(t), + EthTxManager: mock_syncinterfaces.NewEthTxManager(t), //EventLog: newEventLogMock(t), } return &genesis, &cfg, &m diff --git a/test/Makefile b/test/Makefile index 7b6df67f6d..306cb71c98 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,4 +1,4 @@ -DOCKERCOMPOSE := docker-compose -f docker-compose.yml +DOCKERCOMPOSE := docker compose -f docker-compose.yml DOCKERCOMPOSEAPPSEQ := zkevm-sequencer DOCKERCOMPOSEAPPSEQV1TOV2 := zkevm-sequencer-v1tov2 DOCKERCOMPOSEAPPSEQSENDER := zkevm-sequence-sender From c04b2c2848205d17f5ff3d6875396be9c139ab80 Mon Sep 17 00:00:00 2001 From: Thiago Coimbra Lemos Date: Thu, 4 Apr 2024 17:51:06 -0300 Subject: [PATCH 06/11] fix deltaTimeStamp and TimestampLimit for eth_call (#3529) --- state/transaction.go | 12 +- test/contracts/auto/CounterAndBlock.sol | 15 + .../bin/CounterAndBlock/CounterAndBlock.go | 287 ++++++++++++++++++ test/e2e/sc_test.go | 104 +++++++ 4 files changed, 412 insertions(+), 6 deletions(-) create mode 100644 test/contracts/auto/CounterAndBlock.sol create mode 100644 test/contracts/bin/CounterAndBlock/CounterAndBlock.go diff --git a/state/transaction.go b/state/transaction.go index 6b7907b8b5..355fdad9ad 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -509,8 +509,7 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty } nonce := loadedNonce.Uint64() - deltaTimestamp := uint32(uint64(time.Now().Unix()) - l2Block.Time()) - transactions := s.BuildChangeL2Block(deltaTimestamp, uint32(0)) + transactions := s.BuildChangeL2Block(uint32(0), uint32(0)) batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, &nonce, forkID) if err != nil { @@ -535,7 +534,7 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty // v2 fields L1InfoRoot: l2Block.BlockInfoRoot().Bytes(), - TimestampLimit: uint64(time.Now().Unix()), + TimestampLimit: l2Block.Time(), SkipFirstChangeL2Block: cFalse, SkipWriteBlockInfoRoot: cTrue, } @@ -543,14 +542,15 @@ func (s *State) internalProcessUnsignedTransactionV2(ctx context.Context, tx *ty processBatchRequestV2.NoCounters = cTrue } - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.From]: %v", processBatchRequestV2.From) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldBatchNum]: %v", processBatchRequestV2.OldBatchNum) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldStateRoot]: %v", hex.EncodeToHex(processBatchRequestV2.OldStateRoot)) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.OldAccInputHash]: %v", hex.EncodeToHex(processBatchRequestV2.OldAccInputHash)) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.Coinbase]: %v", processBatchRequestV2.Coinbase) - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ForkId]: %v", processBatchRequestV2.ForkId) - log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ChainId]: %v", processBatchRequestV2.ChainId) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.UpdateMerkleTree]: %v", processBatchRequestV2.UpdateMerkleTree) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ChainId]: %v", processBatchRequestV2.ChainId) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ForkId]: %v", processBatchRequestV2.ForkId) + log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.From]: %v", processBatchRequestV2.From) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.ContextId]: %v", processBatchRequestV2.ContextId) log.Debugf("internalProcessUnsignedTransactionV2[processBatchRequestV2.L1InfoRoot]: %v", hex.EncodeToHex(processBatchRequestV2.L1InfoRoot)) diff --git a/test/contracts/auto/CounterAndBlock.sol b/test/contracts/auto/CounterAndBlock.sol new file mode 100644 index 0000000000..53035f0634 --- /dev/null +++ b/test/contracts/auto/CounterAndBlock.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.7.0 <0.9.0; + +contract CounterAndBlock { + uint public count; + + function increment() external { + count += 1; + } + + function getCount() public view returns (uint, uint) { + return (count, block.timestamp); + } +} diff --git a/test/contracts/bin/CounterAndBlock/CounterAndBlock.go b/test/contracts/bin/CounterAndBlock/CounterAndBlock.go new file mode 100644 index 0000000000..c066117f4d --- /dev/null +++ b/test/contracts/bin/CounterAndBlock/CounterAndBlock.go @@ -0,0 +1,287 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package CounterAndBlock + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// CounterAndBlockMetaData contains all meta data concerning the CounterAndBlock contract. +var CounterAndBlockMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[],\"name\":\"count\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getCount\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"increment\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b5060eb8061001f6000396000f3fe6080604052348015600f57600080fd5b5060043610603c5760003560e01c806306661abd146041578063a87d942c14605c578063d09de08a146071575b600080fd5b604960005481565b6040519081526020015b60405180910390f35b60005460408051918252426020830152016053565b60776079565b005b6001600080828254608991906090565b9091555050565b6000821982111560b057634e487b7160e01b600052601160045260246000fd5b50019056fea26469706673582212205aa9aebefdfb857d27d7bdc8475c08138617cc37e78c2e6bd98acb9a1484994964736f6c634300080c0033", +} + +// CounterAndBlockABI is the input ABI used to generate the binding from. +// Deprecated: Use CounterAndBlockMetaData.ABI instead. +var CounterAndBlockABI = CounterAndBlockMetaData.ABI + +// CounterAndBlockBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use CounterAndBlockMetaData.Bin instead. +var CounterAndBlockBin = CounterAndBlockMetaData.Bin + +// DeployCounterAndBlock deploys a new Ethereum contract, binding an instance of CounterAndBlock to it. +func DeployCounterAndBlock(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *CounterAndBlock, error) { + parsed, err := CounterAndBlockMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(CounterAndBlockBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &CounterAndBlock{CounterAndBlockCaller: CounterAndBlockCaller{contract: contract}, CounterAndBlockTransactor: CounterAndBlockTransactor{contract: contract}, CounterAndBlockFilterer: CounterAndBlockFilterer{contract: contract}}, nil +} + +// CounterAndBlock is an auto generated Go binding around an Ethereum contract. +type CounterAndBlock struct { + CounterAndBlockCaller // Read-only binding to the contract + CounterAndBlockTransactor // Write-only binding to the contract + CounterAndBlockFilterer // Log filterer for contract events +} + +// CounterAndBlockCaller is an auto generated read-only Go binding around an Ethereum contract. +type CounterAndBlockCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockTransactor is an auto generated write-only Go binding around an Ethereum contract. +type CounterAndBlockTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type CounterAndBlockFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// CounterAndBlockSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type CounterAndBlockSession struct { + Contract *CounterAndBlock // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// CounterAndBlockCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type CounterAndBlockCallerSession struct { + Contract *CounterAndBlockCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// CounterAndBlockTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type CounterAndBlockTransactorSession struct { + Contract *CounterAndBlockTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// CounterAndBlockRaw is an auto generated low-level Go binding around an Ethereum contract. +type CounterAndBlockRaw struct { + Contract *CounterAndBlock // Generic contract binding to access the raw methods on +} + +// CounterAndBlockCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type CounterAndBlockCallerRaw struct { + Contract *CounterAndBlockCaller // Generic read-only contract binding to access the raw methods on +} + +// CounterAndBlockTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type CounterAndBlockTransactorRaw struct { + Contract *CounterAndBlockTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewCounterAndBlock creates a new instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlock(address common.Address, backend bind.ContractBackend) (*CounterAndBlock, error) { + contract, err := bindCounterAndBlock(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &CounterAndBlock{CounterAndBlockCaller: CounterAndBlockCaller{contract: contract}, CounterAndBlockTransactor: CounterAndBlockTransactor{contract: contract}, CounterAndBlockFilterer: CounterAndBlockFilterer{contract: contract}}, nil +} + +// NewCounterAndBlockCaller creates a new read-only instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockCaller(address common.Address, caller bind.ContractCaller) (*CounterAndBlockCaller, error) { + contract, err := bindCounterAndBlock(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &CounterAndBlockCaller{contract: contract}, nil +} + +// NewCounterAndBlockTransactor creates a new write-only instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockTransactor(address common.Address, transactor bind.ContractTransactor) (*CounterAndBlockTransactor, error) { + contract, err := bindCounterAndBlock(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &CounterAndBlockTransactor{contract: contract}, nil +} + +// NewCounterAndBlockFilterer creates a new log filterer instance of CounterAndBlock, bound to a specific deployed contract. +func NewCounterAndBlockFilterer(address common.Address, filterer bind.ContractFilterer) (*CounterAndBlockFilterer, error) { + contract, err := bindCounterAndBlock(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &CounterAndBlockFilterer{contract: contract}, nil +} + +// bindCounterAndBlock binds a generic wrapper to an already deployed contract. +func bindCounterAndBlock(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := CounterAndBlockMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_CounterAndBlock *CounterAndBlockRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CounterAndBlock.Contract.CounterAndBlockCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_CounterAndBlock *CounterAndBlockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.Contract.CounterAndBlockTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_CounterAndBlock *CounterAndBlockRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CounterAndBlock.Contract.CounterAndBlockTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_CounterAndBlock *CounterAndBlockCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _CounterAndBlock.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_CounterAndBlock *CounterAndBlockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_CounterAndBlock *CounterAndBlockTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _CounterAndBlock.Contract.contract.Transact(opts, method, params...) +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockCaller) Count(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _CounterAndBlock.contract.Call(opts, &out, "count") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockSession) Count() (*big.Int, error) { + return _CounterAndBlock.Contract.Count(&_CounterAndBlock.CallOpts) +} + +// Count is a free data retrieval call binding the contract method 0x06661abd. +// +// Solidity: function count() view returns(uint256) +func (_CounterAndBlock *CounterAndBlockCallerSession) Count() (*big.Int, error) { + return _CounterAndBlock.Contract.Count(&_CounterAndBlock.CallOpts) +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockCaller) GetCount(opts *bind.CallOpts) (*big.Int, *big.Int, error) { + var out []interface{} + err := _CounterAndBlock.contract.Call(opts, &out, "getCount") + + if err != nil { + return *new(*big.Int), *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) + + return out0, out1, err + +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockSession) GetCount() (*big.Int, *big.Int, error) { + return _CounterAndBlock.Contract.GetCount(&_CounterAndBlock.CallOpts) +} + +// GetCount is a free data retrieval call binding the contract method 0xa87d942c. +// +// Solidity: function getCount() view returns(uint256, uint256) +func (_CounterAndBlock *CounterAndBlockCallerSession) GetCount() (*big.Int, *big.Int, error) { + return _CounterAndBlock.Contract.GetCount(&_CounterAndBlock.CallOpts) +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockTransactor) Increment(opts *bind.TransactOpts) (*types.Transaction, error) { + return _CounterAndBlock.contract.Transact(opts, "increment") +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockSession) Increment() (*types.Transaction, error) { + return _CounterAndBlock.Contract.Increment(&_CounterAndBlock.TransactOpts) +} + +// Increment is a paid mutator transaction binding the contract method 0xd09de08a. +// +// Solidity: function increment() returns() +func (_CounterAndBlock *CounterAndBlockTransactorSession) Increment() (*types.Transaction, error) { + return _CounterAndBlock.Contract.Increment(&_CounterAndBlock.TransactOpts) +} diff --git a/test/e2e/sc_test.go b/test/e2e/sc_test.go index 736e47ded4..46311b4eb9 100644 --- a/test/e2e/sc_test.go +++ b/test/e2e/sc_test.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Counter" + "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/CounterAndBlock" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/EmitLog2" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/FailureTest" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Read" @@ -646,3 +647,106 @@ func TestRead(t *testing.T) { require.Equal(t, 0, big.NewInt(2).Cmp(value)) } } + +func TestCounterAndBlock(t *testing.T) { + if testing.Short() { + t.Skip() + } + + var err error + err = operations.Teardown() + require.NoError(t, err) + + defer func() { require.NoError(t, operations.Teardown()) }() + + ctx := context.Background() + opsCfg := operations.GetDefaultOperationsConfig() + opsMan, err := operations.NewManager(ctx, opsCfg) + require.NoError(t, err) + err = opsMan.Setup() + require.NoError(t, err) + + for _, network := range networks { + log.Debugf(network.Name) + client := operations.MustGetClient(network.URL) + auth := operations.MustGetAuth(network.PrivateKey, network.ChainID) + + _, scTx, sc, err := CounterAndBlock.DeployCounterAndBlock(auth, client) + require.NoError(t, err) + + logTx(scTx) + err = operations.WaitTxToBeMined(ctx, client, scTx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + scReceipt, err := client.TransactionReceipt(ctx, scTx.Hash()) + require.NoError(t, err) + + scBlock, err := client.BlockByNumber(ctx, scReceipt.BlockNumber) + require.NoError(t, err) + + count, ts, err := sc.GetCount(&bind.CallOpts{Pending: false, BlockNumber: scBlock.Number()}) + require.NoError(t, err) + + assert.Equal(t, 0, count.Cmp(big.NewInt(0))) + assert.Equal(t, ts.Uint64(), scBlock.Time()) + + const numberOfIncrements = 5 + type result struct { + tx *types.Transaction + receipt *types.Receipt + block *types.Block + expectedCount *big.Int + } + + results := make([]result, 0, numberOfIncrements) + for i := 0; i < numberOfIncrements; i++ { + tx, err := sc.Increment(auth) + require.NoError(t, err) + + logTx(tx) + err = operations.WaitTxToBeMined(ctx, client, tx, operations.DefaultTimeoutTxToBeMined) + require.NoError(t, err) + + receipt, err := client.TransactionReceipt(ctx, tx.Hash()) + require.NoError(t, err) + + block, err := client.BlockByNumber(ctx, receipt.BlockNumber) + require.NoError(t, err) + + results = append(results, result{ + tx: tx, + expectedCount: big.NewInt(int64(i) + 1), + receipt: receipt, + block: block, + }) + } + + const numberOfChecks = 2 + + // checks against first increment + for _, r := range results { + for i := 0; i < numberOfChecks; i++ { + count, ts, err = sc.GetCount(&bind.CallOpts{Pending: false, BlockNumber: r.block.Number()}) + require.NoError(t, err) + assert.Equal(t, r.expectedCount.Uint64(), count.Uint64()) + assert.Equal(t, r.block.Time(), ts.Uint64()) + + time.Sleep(time.Second) + } + } + + latestIncrement := results[len(results)-1] + // checks against second increment with latest block + for i := 0; i < numberOfChecks; i++ { + latestBlock, err := client.BlockByNumber(ctx, nil) + require.NoError(t, err) + + count, ts, err = sc.GetCount(&bind.CallOpts{Pending: false}) + require.NoError(t, err) + assert.Equal(t, latestIncrement.expectedCount.Uint64(), count.Uint64()) + assert.Equal(t, latestBlock.Time(), ts.Uint64()) + + time.Sleep(time.Second) + } + } +} From 208a4eeece3ceab9553bacc827e55cf867fb351a Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Mon, 8 Apr 2024 10:39:43 +0200 Subject: [PATCH 07/11] Stop syncing l1inforoot when invalid l1inforoot is detected (#3542) * stop syncing l1inforoot when invalid l1inforoot is detected * fix linter --- event/event.go | 2 ++ sequencer/finalizer.go | 17 +++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/event/event.go b/event/event.go index f43502df67..e6a72799ce 100644 --- a/event/event.go +++ b/event/event.go @@ -48,6 +48,8 @@ const ( EventID_UsedZKCountersOverflow EventID = "USED ZKCOUNTERS OVERFLOW" // EventID_ReservedZKCountersOverflow is triggered when reserved ZK counters exceeds remaining batch ZK counters EventID_ReservedZKCountersOverflow EventID = "RESERVED ZKCOUNTERS OVERFLOW" + // EventID_InvalidInfoRoot is triggered when an invalid l1InfoRoot was synced + EventID_InvalidInfoRoot EventID = "INVALID INFOROOT" // Source_Node is the source of the event Source_Node Source = "node" diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 744fdbeb8f..7a77f330e1 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -224,6 +224,17 @@ func (f *finalizer) checkL1InfoTreeUpdate(ctx context.Context) { firstL1InfoRootUpdate := true skipFirstSleep := true + if f.cfg.L1InfoTreeCheckInterval.Duration.Seconds() == 999999 { //nolint:gomnd + if !f.lastL1InfoTreeValid { + f.lastL1InfoTreeCond.L.Lock() + f.lastL1InfoTreeValid = true + f.lastL1InfoTreeCond.Broadcast() + f.lastL1InfoTreeCond.L.Unlock() + } + + return + } + for { if skipFirstSleep { skipFirstSleep = false @@ -271,9 +282,11 @@ func (f *finalizer) checkL1InfoTreeUpdate(ctx context.Context) { continue } if l1BlockState.BlockHash != l1BlockEth.Hash() { - log.Warnf("skipping use of l1InfoTreeIndex %d, L1 block %d blockhash %s doesn't match blockhash on ethereum %s (L1 reorg?)", + warnmsg := fmt.Sprintf("invalid l1InfoTreeIndex %d, L1 block %d blockhash %s doesn't match blockhash on ethereum %s (L1 reorg?). Stopping syncing l1IntroTreeIndex", l1InfoRoot.L1InfoTreeIndex, l1InfoRoot.BlockNumber, l1BlockState.BlockHash, l1BlockEth.Hash()) - continue + log.Warn(warnmsg) + f.LogEvent(ctx, event.Level_Critical, event.EventID_InvalidInfoRoot, warnmsg, nil) + return } } From 6ca046478ceaeedf3041c83b68f0668942fb1ab6 Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Mon, 8 Apr 2024 13:14:37 +0200 Subject: [PATCH 08/11] Feature/#3544 sync block protection (#3545) * SyncBlockProtection parameter * linter * config * Fix unit test --- config/default.go | 1 + .../environments/local/local.node.config.toml | 1 + docs/config-file/node-config-doc.html | 2 +- docs/config-file/node-config-doc.md | 55 ++++++++++++------- docs/config-file/node-config-schema.json | 5 ++ etherman/etherman.go | 1 - synchronizer/config.go | 2 + synchronizer/synchronizer.go | 30 +++++++++- synchronizer/synchronizer_test.go | 16 ++++-- test/config/test.node.config.toml | 1 + 10 files changed, 87 insertions(+), 27 deletions(-) diff --git a/config/default.go b/config/default.go index 56b95cfd28..d5703c0a43 100644 --- a/config/default.go +++ b/config/default.go @@ -102,6 +102,7 @@ EnableHttpLog = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +SyncBlockProtection = "latest" # latest, finalized, safe L1SynchronizationMode = "sequential" L1SyncCheckL2BlockHash = true L1SyncCheckL2BlockNumberhModulus = 30 diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index c637c12997..7fdad2a456 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -82,6 +82,7 @@ EnableL2SuggestedGasPricePolling = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc +SyncBlockProtection = "latest" # latest, finalized, safe [Sequencer] DeletePoolTxsL1BlockConfirmations = 100 diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 04b0f12159..528a0708eb 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -16,7 +16,7 @@
"300ms"
 

Default: 500Type: number

MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second


Default: ""Type: string

SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node


Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


WebSockets configuration
Default: trueType: boolean

Enabled defines if the WebSocket requests are enabled or disabled


Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the WS requests


Default: 8546Type: integer

Port defines the port to serve the endpoints via WS


Default: 104857600Type: integer

ReadLimit defines the maximum size of a message read from the client (in bytes)


Default: trueType: boolean

EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.


Default: falseType: boolean

BatchRequestsEnabled defines if the Batch requests are enabled or disabled


Default: 20Type: integer

BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request


Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: 10000Type: integer

MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit


Default: 10000Type: integer

MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit


Default: 60000Type: integer

MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit


Default: trueType: boolean

EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.


ZKCountersLimits defines the ZK Counter limits
Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
 
"300ms"
-

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
+

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: "latest"Type: string

SyncBlockProtection specify the state to sync (lastest, finalized or safe)


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
 
"300ms"
 

Consumer Configuration for the consumer of rollup information from L1
Default: "5s"Type: string

AceptableInacctivityTime is the expected maximum time that the consumer
could wait until new data is produced. If the time is greater it emmit a log to warn about
that. The idea is keep working the consumer as much as possible, so if the producer is not
fast enought then you could increse the number of parallel clients to sync with L1


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 94e0ac56d5..75fd74c99a 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -1339,6 +1339,7 @@ because depending of this values is going to ask to a trusted node for trusted t
 | - [SyncInterval](#Synchronizer_SyncInterval )                                         | No      | string           | No         | -          | Duration                                                                                                                                                                                                                                                |
 | - [SyncChunkSize](#Synchronizer_SyncChunkSize )                                       | No      | integer          | No         | -          | SyncChunkSize is the number of blocks to sync on each chunk                                                                                                                                                                                             |
 | - [TrustedSequencerURL](#Synchronizer_TrustedSequencerURL )                           | No      | string           | No         | -          | TrustedSequencerURL is the rpc url to connect and sync the trusted state                                                                                                                                                                                |
+| - [SyncBlockProtection](#Synchronizer_SyncBlockProtection )                           | No      | string           | No         | -          | SyncBlockProtection specify the state to sync (lastest, finalized or safe)                                                                                                                                                                              |
 | - [L1SyncCheckL2BlockHash](#Synchronizer_L1SyncCheckL2BlockHash )                     | No      | boolean          | No         | -          | L1SyncCheckL2BlockHash if is true when a batch is closed is force to check  L2Block hash against trustedNode (only apply for permissionless)                                                                                                            |
 | - [L1SyncCheckL2BlockNumberhModulus](#Synchronizer_L1SyncCheckL2BlockNumberhModulus ) | No      | integer          | No         | -          | L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...) | | - [L1SynchronizationMode](#Synchronizer_L1SynchronizationMode ) | No | enum (of string) | No | - | L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute | @@ -1399,7 +1400,21 @@ SyncChunkSize=100 TrustedSequencerURL="" ``` -### 9.4. `Synchronizer.L1SyncCheckL2BlockHash` +### 9.4. `Synchronizer.SyncBlockProtection` + +**Type:** : `string` + +**Default:** `"latest"` + +**Description:** SyncBlockProtection specify the state to sync (lastest, finalized or safe) + +**Example setting the default value** ("latest"): +``` +[Synchronizer] +SyncBlockProtection="latest" +``` + +### 9.5. `Synchronizer.L1SyncCheckL2BlockHash` **Type:** : `boolean` @@ -1413,7 +1428,7 @@ TrustedSequencerURL="" L1SyncCheckL2BlockHash=true ``` -### 9.5. `Synchronizer.L1SyncCheckL2BlockNumberhModulus` +### 9.6. `Synchronizer.L1SyncCheckL2BlockNumberhModulus` **Type:** : `integer` @@ -1428,7 +1443,7 @@ a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...) L1SyncCheckL2BlockNumberhModulus=30 ``` -### 9.6. `Synchronizer.L1SynchronizationMode` +### 9.7. `Synchronizer.L1SynchronizationMode` **Type:** : `enum (of string)` @@ -1448,7 +1463,7 @@ Must be one of: * "sequential" * "parallel" -### 9.7. `[Synchronizer.L1ParallelSynchronization]` +### 9.8. `[Synchronizer.L1ParallelSynchronization]` **Type:** : `object` **Description:** L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') @@ -1466,7 +1481,7 @@ Must be one of: | - [RollupInfoRetriesSpacing](#Synchronizer_L1ParallelSynchronization_RollupInfoRetriesSpacing ) | No | string | No | - | Duration | | - [FallbackToSequentialModeOnSynchronized](#Synchronizer_L1ParallelSynchronization_FallbackToSequentialModeOnSynchronized ) | No | boolean | No | - | FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized | -#### 9.7.1. `Synchronizer.L1ParallelSynchronization.MaxClients` +#### 9.8.1. `Synchronizer.L1ParallelSynchronization.MaxClients` **Type:** : `integer` @@ -1480,7 +1495,7 @@ Must be one of: MaxClients=10 ``` -#### 9.7.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks` +#### 9.8.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks` **Type:** : `integer` @@ -1495,7 +1510,7 @@ sugested twice of NumberOfParallelOfEthereumClients MaxPendingNoProcessedBlocks=25 ``` -#### 9.7.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod` +#### 9.8.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod` **Title:** Duration @@ -1523,7 +1538,7 @@ This value only apply when the system is synchronized RequestLastBlockPeriod="5s" ``` -#### 9.7.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]` +#### 9.8.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]` **Type:** : `object` **Description:** Consumer Configuration for the consumer of rollup information from L1 @@ -1533,7 +1548,7 @@ RequestLastBlockPeriod="5s" | - [AceptableInacctivityTime](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_AceptableInacctivityTime ) | No | string | No | - | Duration | | - [ApplyAfterNumRollupReceived](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_ApplyAfterNumRollupReceived ) | No | integer | No | - | ApplyAfterNumRollupReceived is the number of iterations to
start checking the time waiting for new rollup info data | -##### 9.7.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime` +##### 9.8.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime` **Title:** Duration @@ -1562,7 +1577,7 @@ fast enought then you could increse the number of parallel clients to sync with AceptableInacctivityTime="5s" ``` -##### 9.7.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived` +##### 9.8.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived` **Type:** : `integer` @@ -1577,7 +1592,7 @@ start checking the time waiting for new rollup info data ApplyAfterNumRollupReceived=10 ``` -#### 9.7.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout` +#### 9.8.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout` **Title:** Duration @@ -1603,7 +1618,7 @@ ApplyAfterNumRollupReceived=10 RequestLastBlockTimeout="5s" ``` -#### 9.7.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries` +#### 9.8.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries` **Type:** : `integer` @@ -1617,7 +1632,7 @@ RequestLastBlockTimeout="5s" RequestLastBlockMaxRetries=3 ``` -#### 9.7.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod` +#### 9.8.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod` **Title:** Duration @@ -1643,7 +1658,7 @@ RequestLastBlockMaxRetries=3 StatisticsPeriod="5m0s" ``` -#### 9.7.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop` +#### 9.8.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop` **Title:** Duration @@ -1669,7 +1684,7 @@ StatisticsPeriod="5m0s" TimeOutMainLoop="5m0s" ``` -#### 9.7.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing` +#### 9.8.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing` **Title:** Duration @@ -1695,7 +1710,7 @@ TimeOutMainLoop="5m0s" RollupInfoRetriesSpacing="5s" ``` -#### 9.7.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized` +#### 9.8.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized` **Type:** : `boolean` @@ -1709,7 +1724,7 @@ RollupInfoRetriesSpacing="5s" FallbackToSequentialModeOnSynchronized=false ``` -### 9.8. `[Synchronizer.L2Synchronization]` +### 9.9. `[Synchronizer.L2Synchronization]` **Type:** : `object` **Description:** L2Synchronization Configuration for L2 synchronization @@ -1720,7 +1735,7 @@ FallbackToSequentialModeOnSynchronized=false | - [ReprocessFullBatchOnClose](#Synchronizer_L2Synchronization_ReprocessFullBatchOnClose ) | No | boolean | No | - | ReprocessFullBatchOnClose if is true when a batch is closed is force to reprocess again | | - [CheckLastL2BlockHashOnCloseBatch](#Synchronizer_L2Synchronization_CheckLastL2BlockHashOnCloseBatch ) | No | boolean | No | - | CheckLastL2BlockHashOnCloseBatch if is true when a batch is closed is force to check the last L2Block hash | -#### 9.8.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches` +#### 9.9.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches` **Type:** : `boolean` @@ -1735,7 +1750,7 @@ if true, the synchronizer will accept empty batches and process them. AcceptEmptyClosedBatches=false ``` -#### 9.8.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose` +#### 9.9.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose` **Type:** : `boolean` @@ -1749,7 +1764,7 @@ AcceptEmptyClosedBatches=false ReprocessFullBatchOnClose=false ``` -#### 9.8.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch` +#### 9.9.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch` **Type:** : `boolean` diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json index 4ec94535a7..ae4e36147a 100644 --- a/docs/config-file/node-config-schema.json +++ b/docs/config-file/node-config-schema.json @@ -517,6 +517,11 @@ "description": "TrustedSequencerURL is the rpc url to connect and sync the trusted state", "default": "" }, + "SyncBlockProtection": { + "type": "string", + "description": "SyncBlockProtection specify the state to sync (lastest, finalized or safe)", + "default": "latest" + }, "L1SyncCheckL2BlockHash": { "type": "boolean", "description": "L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)", diff --git a/etherman/etherman.go b/etherman/etherman.go index 4b8a78a59d..c7e4ec77b0 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -1185,7 +1185,6 @@ func (etherMan *Client) forcedBatchEvent(ctx context.Context, vLog types.Log, bl func (etherMan *Client) sequencedBatchesEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { log.Debugf("SequenceBatches event detected: txHash: %s", common.Bytes2Hex(vLog.TxHash[:])) - //tx,isPending, err:=etherMan.EthClient.TransactionByHash(ctx, vLog.TxHash) sb, err := etherMan.ZkEVM.ParseSequenceBatches(vLog) if err != nil { diff --git a/synchronizer/config.go b/synchronizer/config.go index 55bc29d3e7..0f7d822a60 100644 --- a/synchronizer/config.go +++ b/synchronizer/config.go @@ -13,6 +13,8 @@ type Config struct { SyncChunkSize uint64 `mapstructure:"SyncChunkSize"` // TrustedSequencerURL is the rpc url to connect and sync the trusted state TrustedSequencerURL string `mapstructure:"TrustedSequencerURL"` + // SyncBlockProtection specify the state to sync (lastest, finalized or safe) + SyncBlockProtection string `mapstructure:"SyncBlockProtection"` // L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless) L1SyncCheckL2BlockHash bool `mapstructure:"L1SyncCheckL2BlockHash"` diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 4cb93bab77..5e468813a5 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -22,6 +22,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/synchronizer/l2_sync/l2_sync_etrog" "github.com/0xPolygonHermez/zkevm-node/synchronizer/metrics" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" "github.com/jackc/pgx/v4" ) @@ -53,6 +54,7 @@ type ClientSynchronizer struct { latestFlushID uint64 // If true the lastFlushID is stored in DB and we don't need to check again latestFlushIDIsFulfilled bool + syncBlockProtection rpc.BlockNumber etherManForL1 []syncinterfaces.EthermanFullInterface state syncinterfaces.StateFullInterface pool syncinterfaces.PoolInterface @@ -93,6 +95,13 @@ func NewSynchronizer( runInDevelopmentMode bool) (Synchronizer, error) { ctx, cancel := context.WithCancel(context.Background()) metrics.Register() + syncBlockProtection, err := decodeSyncBlockProtection(cfg.SyncBlockProtection) + if err != nil { + log.Errorf("error decoding syncBlockProtection. Error: %v", err) + cancel() + return nil, err + } + log.Info("syncBlockProtection: ", syncBlockProtection) res := &ClientSynchronizer{ isTrustedSequencer: isTrustedSequencer, state: st, @@ -111,6 +120,7 @@ func NewSynchronizer( previousExecutorFlushID: 0, l1SyncOrchestration: nil, l1EventProcessors: nil, + syncBlockProtection: syncBlockProtection, halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd } @@ -166,6 +176,19 @@ func NewSynchronizer( return res, nil } +func decodeSyncBlockProtection(sBP string) (rpc.BlockNumber, error) { + switch sBP { + case "latest": + return rpc.LatestBlockNumber, nil + case "finalized": + return rpc.FinalizedBlockNumber, nil + case "safe": + return rpc.SafeBlockNumber, nil + default: + return 0, fmt.Errorf("error decoding SyncBlockProtection. Unknown value") + } +} + var waitDuration = time.Duration(0) func newL1SyncParallel(ctx context.Context, cfg Config, etherManForL1 []syncinterfaces.EthermanFullInterface, sync *ClientSynchronizer, runExternalControl bool) *l1_parallel_sync.L1SyncOrchestration { @@ -491,7 +514,7 @@ func (s *ClientSynchronizer) syncBlocksParallel(lastEthBlockSynced *state.Block) // This function syncs the node from a specific block to the latest func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Block) (*state.Block, error) { // Call the blockchain to retrieve data - header, err := s.etherMan.HeaderByNumber(s.ctx, nil) + header, err := s.etherMan.HeaderByNumber(s.ctx, big.NewInt(s.syncBlockProtection.Int64())) if err != nil { log.Error("error getting header of the latest block in L1. Error: ", err) return lastEthBlockSynced, err @@ -520,6 +543,9 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc for { toBlock := fromBlock + s.cfg.SyncChunkSize + if toBlock > lastKnownBlock.Uint64() { + toBlock = lastKnownBlock.Uint64() + } log.Infof("Syncing block %d of %d", fromBlock, lastKnownBlock.Uint64()) log.Infof("Getting rollup info from block %d to block %d", fromBlock, toBlock) // This function returns the rollup information contained in the ethereum blocks and an extra param called order. @@ -750,6 +776,8 @@ func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block) (*state.Block, log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", reorgedBlock.BlockNumber, err) return nil, err } + log.Infof("[checkReorg function] BlockNumber: %d BlockHash got from L1 provider: %s", block.Number().Uint64(), block.Hash().String()) + log.Infof("[checkReorg function] latestBlockNumber: %d latestBlockHash already synced: %s", latestBlock.BlockNumber, latestBlock.BlockHash.String()) if block.NumberU64() != reorgedBlock.BlockNumber { err = fmt.Errorf("wrong ethereum block retrieved from blockchain. Block numbers don't match. BlockNumber stored: %d. BlockNumber retrieved: %d", reorgedBlock.BlockNumber, block.NumberU64()) diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index a0be1b3c63..38cc155780 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -18,6 +18,7 @@ import ( syncMocks "github.com/0xPolygonHermez/zkevm-node/synchronizer/mocks" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/jackc/pgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -126,6 +127,7 @@ func TestForcedBatchEtrog(t *testing.T) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", } m := mocks{ @@ -202,7 +204,7 @@ func TestForcedBatchEtrog(t *testing.T) { Return(ethBlock, nil). Once() - var n *big.Int + n := big.NewInt(rpc.LatestBlockNumber.Int64()) m.Etherman. On("HeaderByNumber", mock.Anything, n). Return(ethHeader, nil). @@ -255,7 +257,9 @@ func TestForcedBatchEtrog(t *testing.T) { fromBlock := ethBlock.NumberU64() + 1 toBlock := fromBlock + cfg.SyncChunkSize - + if toBlock > ethHeader.Number.Uint64() { + toBlock = ethHeader.Number.Uint64() + } m.Etherman. On("GetRollupInfoByBlockRange", mock.Anything, fromBlock, &toBlock). Return(blocks, order, nil). @@ -384,6 +388,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", } m := mocks{ @@ -461,7 +466,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { Return(ethBlock, nil). Once() - var n *big.Int + n := big.NewInt(rpc.LatestBlockNumber.Int64()) m.Etherman. On("HeaderByNumber", ctx, n). Return(ethHeader, nil). @@ -509,7 +514,9 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { fromBlock := ethBlock.NumberU64() + 1 toBlock := fromBlock + cfg.SyncChunkSize - + if toBlock > ethHeader.Number.Uint64() { + toBlock = ethHeader.Number.Uint64() + } m.Etherman. On("GetRollupInfoByBlockRange", ctx, fromBlock, &toBlock). Return(blocks, order, nil). @@ -628,6 +635,7 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, SyncChunkSize: 10, L1SynchronizationMode: SequentialMode, + SyncBlockProtection: "latest", L1ParallelSynchronization: L1ParallelSynchronizationConfig{ MaxClients: 2, MaxPendingNoProcessedBlocks: 2, diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index aa3ca72a64..6a7a7efd0b 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -82,6 +82,7 @@ EnableL2SuggestedGasPricePolling = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc. +SyncBlockProtection = "latest" # latest, finalized, safe L1SynchronizationMode = "sequential" [Synchronizer.L1ParallelSynchronization] MaxClients = 10 From fed89caa2be0fabbdad81e61aef01a8428725e9a Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:19:33 +0200 Subject: [PATCH 09/11] add column checked on state.block (#3543) * add column checked on state.block * if no unchecked blocks return ErrNotFound * migration set to checked all but the block with number below max-1000 --- db/migrations/state/0018.sql | 11 ++ db/migrations/state/0018_test.go | 69 ++++++++ state/block.go | 1 + state/interfaces.go | 2 + state/mocks/mock_storage.go | 109 ++++++++++++ state/pgstatestorage/block.go | 47 +++++- state/pgstatestorage/pgstatestorage_test.go | 37 +++++ .../mocks/state_full_interface.go | 156 ++++++++++++------ synchronizer/common/syncinterfaces/state.go | 2 + 9 files changed, 379 insertions(+), 55 deletions(-) create mode 100644 db/migrations/state/0018.sql create mode 100644 db/migrations/state/0018_test.go diff --git a/db/migrations/state/0018.sql b/db/migrations/state/0018.sql new file mode 100644 index 0000000000..3d9db107c1 --- /dev/null +++ b/db/migrations/state/0018.sql @@ -0,0 +1,11 @@ +-- +migrate Up +ALTER TABLE state.block + ADD COLUMN IF NOT EXISTS checked BOOL NOT NULL DEFAULT FALSE; + +-- set block.checked to true for all blocks below max - 100 +UPDATE state.block SET checked = true WHERE block_num <= (SELECT MAX(block_num) - 1000 FROM state.block); + +-- +migrate Down +ALTER TABLE state.block + DROP COLUMN IF EXISTS checked; + diff --git a/db/migrations/state/0018_test.go b/db/migrations/state/0018_test.go new file mode 100644 index 0000000000..b8a51dbb49 --- /dev/null +++ b/db/migrations/state/0018_test.go @@ -0,0 +1,69 @@ +package migrations_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0018 struct{} + +func (m migrationTest0018) InsertData(db *sql.DB) error { + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES ($1, $2, $3)" + if _, err := db.Exec(addBlock, 1, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + if _, err := db.Exec(addBlock, 50, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + if _, err := db.Exec(addBlock, 1050, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1"); err != nil { + return err + } + return nil +} + +func (m migrationTest0018) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + var checked bool + row := db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, true, checked) + row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 50) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, true, checked) + row = db.QueryRow("SELECT checked FROM state.block WHERE block_num = $1", 1050) + assert.NoError(t, row.Scan(&checked)) + assert.Equal(t, false, checked) + + const addBlock = "INSERT INTO state.block (block_num, received_at, block_hash, checked) VALUES ($1, $2, $3, $4)" + _, err := db.Exec(addBlock, 2, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", true) + assert.NoError(t, err) + _, err = db.Exec(addBlock, 3, time.Now(), "0x29e885edaf8e4b51e1d2e05f9da28161d2fb4f6b1d53827d9b80a23cf2d7d9f1", false) + assert.NoError(t, err) + const sql = `SELECT count(*) FROM state.block WHERE checked = true` + row = db.QueryRow(sql) + var result int + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 3, result, "must be 1,50 per migration and 2 by insert") + + const sqlCheckedFalse = `SELECT count(*) FROM state.block WHERE checked = false` + row = db.QueryRow(sqlCheckedFalse) + + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 2, result, "must be 150 by migration, and 3 by insert") +} + +func (m migrationTest0018) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + var result int + + // Check column wip doesn't exists in state.batch table + const sql = `SELECT count(*) FROM state.block` + row := db.QueryRow(sql) + assert.NoError(t, row.Scan(&result)) + assert.Equal(t, 5, result) +} + +func TestMigration0018(t *testing.T) { + runMigrationTest(t, 18, migrationTest0018{}) +} diff --git a/state/block.go b/state/block.go index c5c9fbb1a2..7883770249 100644 --- a/state/block.go +++ b/state/block.go @@ -12,6 +12,7 @@ type Block struct { BlockHash common.Hash ParentHash common.Hash ReceivedAt time.Time + Checked bool } // NewBlock creates a block with the given data. diff --git a/state/interfaces.go b/state/interfaces.go index ac9c2a0a67..dfde07d8ce 100644 --- a/state/interfaces.go +++ b/state/interfaces.go @@ -24,6 +24,8 @@ type storage interface { GetTxsOlderThanNL1BlocksUntilTxHash(ctx context.Context, nL1Blocks uint64, earliestTxHash common.Hash, dbTx pgx.Tx) ([]common.Hash, error) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*Block, error) GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*Block, error) + GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*Block, error) + UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error AddGlobalExitRoot(ctx context.Context, exitRoot *GlobalExitRoot, dbTx pgx.Tx) error GetLatestGlobalExitRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (GlobalExitRoot, time.Time, error) GetNumberOfBlocksSinceLastGERUpdate(ctx context.Context, dbTx pgx.Tx) (uint64, error) diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go index d47b4524bd..2b03479dee 100644 --- a/state/mocks/mock_storage.go +++ b/state/mocks/mock_storage.go @@ -2380,6 +2380,66 @@ func (_c *StorageMock_GetFirstL2BlockNumberForBatchNumber_Call) RunAndReturn(run return _c } +// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx +func (_m *StorageMock) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstUncheckedBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, fromBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StorageMock_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock' +type StorageMock_GetFirstUncheckedBlock_Call struct { + *mock.Call +} + +// GetFirstUncheckedBlock is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StorageMock_GetFirstUncheckedBlock_Call { + return &StorageMock_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)} +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StorageMock_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StorageMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetForcedBatch provides a mock function with given fields: ctx, forcedBatchNumber, dbTx func (_m *StorageMock) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) { ret := _m.Called(ctx, forcedBatchNumber, dbTx) @@ -8152,6 +8212,55 @@ func (_c *StorageMock_UpdateBatchL2Data_Call) RunAndReturn(run func(context.Cont return _c } +// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx +func (_m *StorageMock) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateCheckedBlockByNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok { + r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StorageMock_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber' +type StorageMock_UpdateCheckedBlockByNumber_Call struct { + *mock.Call +} + +// UpdateCheckedBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - newCheckedStatus bool +// - dbTx pgx.Tx +func (_e *StorageMock_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StorageMock_UpdateCheckedBlockByNumber_Call { + return &StorageMock_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)} +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StorageMock_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StorageMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // UpdateForkIDBlockNumber provides a mock function with given fields: ctx, forkdID, newBlockNumber, updateMemCache, dbTx func (_m *StorageMock) UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error { ret := _m.Called(ctx, forkdID, newBlockNumber, updateMemCache, dbTx) diff --git a/state/pgstatestorage/block.go b/state/pgstatestorage/block.go index f2ae7abd17..768b384df1 100644 --- a/state/pgstatestorage/block.go +++ b/state/pgstatestorage/block.go @@ -16,10 +16,10 @@ const ( // AddBlock adds a new block to the State Store func (p *PostgresStorage) AddBlock(ctx context.Context, block *state.Block, dbTx pgx.Tx) error { - const addBlockSQL = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at) VALUES ($1, $2, $3, $4)" + const addBlockSQL = "INSERT INTO state.block (block_num, block_hash, parent_hash, received_at, checked) VALUES ($1, $2, $3, $4, $5)" e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, addBlockSQL, block.BlockNumber, block.BlockHash.String(), block.ParentHash.String(), block.ReceivedAt) + _, err := e.Exec(ctx, addBlockSQL, block.BlockNumber, block.BlockHash.String(), block.ParentHash.String(), block.ReceivedAt, block.Checked) return err } @@ -30,11 +30,11 @@ func (p *PostgresStorage) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state parentHash string block state.Block ) - const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block ORDER BY block_num DESC LIMIT 1" + const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at, checked FROM state.block ORDER BY block_num DESC LIMIT 1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getLastBlockSQL).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getLastBlockSQL).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrStateNotSynchronized } @@ -43,6 +43,26 @@ func (p *PostgresStorage) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state return &block, err } +// GetFirstUncheckedBlock returns the first L1 block that has not been checked from a given block number. +func (p *PostgresStorage) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + var ( + blockHash string + parentHash string + block state.Block + ) + const getLastBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at, checked FROM state.block WHERE block_num>=$1 AND checked=false ORDER BY block_num LIMIT 1" + + q := p.getExecQuerier(dbTx) + + err := q.QueryRow(ctx, getLastBlockSQL, fromBlockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) + if errors.Is(err, pgx.ErrNoRows) { + return nil, state.ErrNotFound + } + block.BlockHash = common.HexToHash(blockHash) + block.ParentHash = common.HexToHash(parentHash) + return &block, err +} + // GetPreviousBlock gets the offset previous L1 block respect to latest. func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error) { var ( @@ -50,11 +70,11 @@ func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, d parentHash string block state.Block ) - const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block ORDER BY block_num DESC LIMIT 1 OFFSET $1" + const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at,checked FROM state.block ORDER BY block_num DESC LIMIT 1 OFFSET $1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getPreviousBlockSQL, offset).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getPreviousBlockSQL, offset).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound } @@ -70,11 +90,11 @@ func (p *PostgresStorage) GetBlockByNumber(ctx context.Context, blockNumber uint parentHash string block state.Block ) - const getBlockByNumberSQL = "SELECT block_num, block_hash, parent_hash, received_at FROM state.block WHERE block_num = $1" + const getBlockByNumberSQL = "SELECT block_num, block_hash, parent_hash, received_at,checked FROM state.block WHERE block_num = $1" q := p.getExecQuerier(dbTx) - err := q.QueryRow(ctx, getBlockByNumberSQL, blockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt) + err := q.QueryRow(ctx, getBlockByNumberSQL, blockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked) if errors.Is(err, pgx.ErrNoRows) { return nil, state.ErrNotFound } @@ -82,3 +102,14 @@ func (p *PostgresStorage) GetBlockByNumber(ctx context.Context, blockNumber uint block.ParentHash = common.HexToHash(parentHash) return &block, err } + +// UpdateCheckedBlockByNumber update checked flag for a block +func (p *PostgresStorage) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + const query = ` + UPDATE state.block + SET checked = $1 WHERE block_num = $2` + + e := p.getExecQuerier(dbTx) + _, err := e.Exec(ctx, query, newCheckedStatus, blockNumber) + return err +} diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go index 7d934f029e..416b21b47b 100644 --- a/state/pgstatestorage/pgstatestorage_test.go +++ b/state/pgstatestorage/pgstatestorage_test.go @@ -1650,3 +1650,40 @@ func TestGetLastGER(t *testing.T) { require.Equal(t, common.HexToHash("0x2").String(), ger.String()) } + +func TestGetFirstUncheckedBlock(t *testing.T) { + var err error + blockNumber := uint64(51001) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber, Checked: true}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 1, Checked: false}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 2, Checked: true}, nil) + require.NoError(t, err) + + block, err := testState.GetFirstUncheckedBlock(context.Background(), blockNumber, nil) + require.NoError(t, err) + require.Equal(t, uint64(blockNumber+1), block.BlockNumber) +} + +func TestUpdateCheckedBlockByNumber(t *testing.T) { + var err error + blockNumber := uint64(54001) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber, Checked: true}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 1, Checked: false}, nil) + require.NoError(t, err) + err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 2, Checked: true}, nil) + require.NoError(t, err) + + b1, err := testState.GetBlockByNumber(context.Background(), uint64(blockNumber), nil) + require.NoError(t, err) + require.True(t, b1.Checked) + + err = testState.UpdateCheckedBlockByNumber(context.Background(), uint64(blockNumber), false, nil) + require.NoError(t, err) + + b1, err = testState.GetBlockByNumber(context.Background(), uint64(blockNumber), nil) + require.NoError(t, err) + require.False(t, b1.Checked) +} diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go index 1559654641..f4790bc695 100644 --- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go @@ -881,6 +881,66 @@ func (_c *StateFullInterface_GetExitRootByGlobalExitRoot_Call) RunAndReturn(run return _c } +// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx +func (_m *StateFullInterface) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstUncheckedBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, fromBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateFullInterface_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock' +type StateFullInterface_GetFirstUncheckedBlock_Call struct { + *mock.Call +} + +// GetFirstUncheckedBlock is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StateFullInterface_GetFirstUncheckedBlock_Call { + return &StateFullInterface_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)} +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateFullInterface_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateFullInterface_GetFirstUncheckedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetForkIDByBatchNumber provides a mock function with given fields: batchNumber func (_m *StateFullInterface) GetForkIDByBatchNumber(batchNumber uint64) uint64 { ret := _m.Called(batchNumber) @@ -2343,53 +2403,6 @@ func (_c *StateFullInterface_ResetForkID_Call) RunAndReturn(run func(context.Con return _c } -// ResetL1InfoTree provides a mock function with given fields: ctx, dbTx -func (_m *StateFullInterface) ResetL1InfoTree(ctx context.Context, dbTx pgx.Tx) error { - ret := _m.Called(ctx, dbTx) - - if len(ret) == 0 { - panic("no return value specified for ResetL1InfoTree") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { - r0 = rf(ctx, dbTx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StateFullInterface_ResetL1InfoTree_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetL1InfoTree' -type StateFullInterface_ResetL1InfoTree_Call struct { - *mock.Call -} - -// ResetL1InfoTree is a helper method to define mock.On call -// - ctx context.Context -// - dbTx pgx.Tx -func (_e *StateFullInterface_Expecter) ResetL1InfoTree(ctx interface{}, dbTx interface{}) *StateFullInterface_ResetL1InfoTree_Call { - return &StateFullInterface_ResetL1InfoTree_Call{Call: _e.mock.On("ResetL1InfoTree", ctx, dbTx)} -} - -func (_c *StateFullInterface_ResetL1InfoTree_Call) Run(run func(ctx context.Context, dbTx pgx.Tx)) *StateFullInterface_ResetL1InfoTree_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(pgx.Tx)) - }) - return _c -} - -func (_c *StateFullInterface_ResetL1InfoTree_Call) Return(_a0 error) *StateFullInterface_ResetL1InfoTree_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *StateFullInterface_ResetL1InfoTree_Call) RunAndReturn(run func(context.Context, pgx.Tx) error) *StateFullInterface_ResetL1InfoTree_Call { - _c.Call.Return(run) - return _c -} - // ResetTrustedState provides a mock function with given fields: ctx, batchNumber, dbTx func (_m *StateFullInterface) ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { ret := _m.Called(ctx, batchNumber, dbTx) @@ -2762,6 +2775,55 @@ func (_c *StateFullInterface_UpdateBatchL2Data_Call) RunAndReturn(run func(conte return _c } +// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx +func (_m *StateFullInterface) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateCheckedBlockByNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok { + r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StateFullInterface_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber' +type StateFullInterface_UpdateCheckedBlockByNumber_Call struct { + *mock.Call +} + +// UpdateCheckedBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - newCheckedStatus bool +// - dbTx pgx.Tx +func (_e *StateFullInterface_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + return &StateFullInterface_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)} +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StateFullInterface_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StateFullInterface_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // UpdateForkIDBlockNumber provides a mock function with given fields: ctx, forkdID, newBlockNumber, updateMemCache, dbTx func (_m *StateFullInterface) UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error { ret := _m.Called(ctx, forkdID, newBlockNumber, updateMemCache, dbTx) diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go index 2895eb0903..0aff583319 100644 --- a/synchronizer/common/syncinterfaces/state.go +++ b/synchronizer/common/syncinterfaces/state.go @@ -29,6 +29,8 @@ type StateFullInterface interface { AddBlock(ctx context.Context, block *state.Block, dbTx pgx.Tx) error Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) error GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error) + GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) + UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error) ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error From 64dcff0d571236be0294f663c42d0e2e1872823f Mon Sep 17 00:00:00 2001 From: Alonso Rodriguez Date: Mon, 8 Apr 2024 17:22:45 +0200 Subject: [PATCH 10/11] safe mode by default (#3547) * safe mode by default * doc --- config/default.go | 2 +- docs/config-file/node-config-doc.html | 2 +- docs/config-file/node-config-doc.md | 6 +++--- docs/config-file/node-config-schema.json | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config/default.go b/config/default.go index d5703c0a43..a8971263ca 100644 --- a/config/default.go +++ b/config/default.go @@ -102,7 +102,7 @@ EnableHttpLog = true SyncInterval = "1s" SyncChunkSize = 100 TrustedSequencerURL = "" # If it is empty or not specified, then the value is read from the smc -SyncBlockProtection = "latest" # latest, finalized, safe +SyncBlockProtection = "safe" # latest, finalized, safe L1SynchronizationMode = "sequential" L1SyncCheckL2BlockHash = true L1SyncCheckL2BlockNumberhModulus = 30 diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 528a0708eb..9122c48735 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -16,7 +16,7 @@
"300ms"
 

Default: 500Type: number

MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second


Default: ""Type: string

SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node


Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


WebSockets configuration
Default: trueType: boolean

Enabled defines if the WebSocket requests are enabled or disabled


Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the WS requests


Default: 8546Type: integer

Port defines the port to serve the endpoints via WS


Default: 104857600Type: integer

ReadLimit defines the maximum size of a message read from the client (in bytes)


Default: trueType: boolean

EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.


Default: falseType: boolean

BatchRequestsEnabled defines if the Batch requests are enabled or disabled


Default: 20Type: integer

BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request


Type: array of integer

L2Coinbase defines which address is going to receive the fees

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Default: 10000Type: integer

MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit


Default: 10000Type: integer

MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit


Default: 60000Type: integer

MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit


Default: trueType: boolean

EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.


ZKCountersLimits defines the ZK Counter limits
Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Default: 0Type: integer

Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
 
"300ms"
-

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: "latest"Type: string

SyncBlockProtection specify the state to sync (lastest, finalized or safe)


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
+

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Default: "safe"Type: string

SyncBlockProtection specify the state to sync (lastest, finalized or safe)


Default: trueType: boolean

L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)


Default: 30Type: integer

L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)


Default: "sequential"Type: enum (of string)

L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute

Must be one of:

  • "sequential"
  • "parallel"

L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
Default: 10Type: integer

MaxClients Number of clients used to synchronize with L1


Default: 25Type: integer

MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients


Default: "5s"Type: string

RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized


Examples:

"1m"
 
"300ms"
 

Consumer Configuration for the consumer of rollup information from L1
Default: "5s"Type: string

AceptableInacctivityTime is the expected maximum time that the consumer
could wait until new data is produced. If the time is greater it emmit a log to warn about
that. The idea is keep working the consumer as much as possible, so if the producer is not
fast enought then you could increse the number of parallel clients to sync with L1


Examples:

"1m"
 
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 75fd74c99a..23619596c8 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -1404,14 +1404,14 @@ TrustedSequencerURL=""
 
 **Type:** : `string`
 
-**Default:** `"latest"`
+**Default:** `"safe"`
 
 **Description:** SyncBlockProtection specify the state to sync (lastest, finalized or safe)
 
-**Example setting the default value** ("latest"):
+**Example setting the default value** ("safe"):
 ```
 [Synchronizer]
-SyncBlockProtection="latest"
+SyncBlockProtection="safe"
 ```
 
 ### 9.5. `Synchronizer.L1SyncCheckL2BlockHash`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index ae4e36147a..1c93ea4b3c 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -520,7 +520,7 @@
 				"SyncBlockProtection": {
 					"type": "string",
 					"description": "SyncBlockProtection specify the state to sync (lastest, finalized or safe)",
-					"default": "latest"
+					"default": "safe"
 				},
 				"L1SyncCheckL2BlockHash": {
 					"type": "boolean",

From e441a8974bc5997ae79b873adf7f757681127f82 Mon Sep 17 00:00:00 2001
From: JianGuo 
Date: Wed, 10 Apr 2024 21:18:14 +0800
Subject: [PATCH 11/11] Fix ut

---
 test/e2e/sc_test.go | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/test/e2e/sc_test.go b/test/e2e/sc_test.go
index 501f8216a3..720aa68cd2 100644
--- a/test/e2e/sc_test.go
+++ b/test/e2e/sc_test.go
@@ -669,7 +669,12 @@ func TestCounterAndBlock(t *testing.T) {
 	for _, network := range networks {
 		log.Debugf(network.Name)
 		client := operations.MustGetClient(network.URL)
-		auth := operations.MustGetAuth(network.PrivateKey, network.ChainID)
+		priKey := network.PrivateKey
+		if network.Name == "Local L2" {
+			priKey = "0xde3ca643a52f5543e84ba984c4419ff40dbabd0e483c31c1d09fee8168d68e38"
+		}
+		auth := operations.MustGetAuth(priKey, network.ChainID)
+		log.Infof("auth:%v, chainID:%v", auth.From.String(), network.ChainID)
 
 		_, scTx, sc, err := CounterAndBlock.DeployCounterAndBlock(auth, client)
 		require.NoError(t, err)