From 5576e50b42f264881aec06c83502f246c53f9e19 Mon Sep 17 00:00:00 2001 From: agnusmor <100322135+agnusmor@users.noreply.github.com> Date: Mon, 4 Dec 2023 16:53:42 +0100 Subject: [PATCH] Sequencer Etrog refactor (#2878) * sequencer etrog draft 1 * refactor sequencer etrog implemetation --- config/config_test.go | 20 +- config/default.go | 5 +- .../environments/local/local.node.config.toml | 5 +- docs/config-file/node-config-doc.html | 8 +- docs/config-file/node-config-doc.md | 95 +- docs/config-file/node-config-schema.json | 33 +- sequencer/addrqueue.go | 12 +- sequencer/batch.go | 316 +++++++ sequencer/closingsignalsmanager.go | 2 +- sequencer/config.go | 13 +- sequencer/dbmanager.go | 199 ++-- sequencer/errors.go | 4 + sequencer/finalizer.go | 848 ++++++------------ sequencer/finalizer_test.go | 200 ++--- sequencer/interfaces.go | 16 +- sequencer/l2block.go | 433 +++++++++ sequencer/mock_db_manager.go | 114 ++- sequencer/mock_etherman.go | 24 - sequencer/mock_state.go | 56 +- sequencer/mock_worker.go | 14 +- sequencer/sequencer.go | 23 +- sequencer/txtracker.go | 1 + sequencer/worker.go | 83 +- sequencer/worker_test.go | 2 +- sequencesender/sequencesender.go | 6 +- state/batchV2.go | 7 +- state/helper.go | 2 +- state/interfaces.go | 3 +- state/pgstatestorage/batch.go | 4 +- state/pgstatestorage/l1infotree.go | 22 +- state/pgstatestorage/l2block.go | 1 + state/transaction.go | 4 +- state/types.go | 2 +- test/config/debug.node.config.toml | 5 +- test/config/test.node.config.toml | 5 +- 35 files changed, 1643 insertions(+), 944 deletions(-) create mode 100644 sequencer/batch.go create mode 100644 sequencer/l2block.go diff --git a/config/config_test.go b/config/config_test.go index 89943a4a5d..4a382e5ecb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -92,6 +92,14 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.Finalizer.GERFinalityNumberOfBlocks", expectedValue: uint64(64), }, + { + path: "Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks", + expectedValue: uint64(64), + }, + { + path: "Sequencer.Finalizer.L1InfoRootFinalityNumberOfBlocks", + expectedValue: uint64(64), + }, { path: "Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout", expectedValue: types.NewDuration(10 * time.Second), @@ -105,8 +113,12 @@ func Test_Defaults(t *testing.T) { expectedValue: types.NewDuration(10 * time.Second), }, { - path: "Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks", - expectedValue: uint64(64), + path: "Sequencer.Finalizer.WaitForCheckingL1InfoTree", + expectedValue: types.NewDuration(10 * time.Second), + }, + { + path: "Sequencer.Finalizer.L2BlockTime", + expectedValue: types.NewDuration(3 * time.Second), }, { path: "Sequencer.Finalizer.StopSequencerOnBatchNum", @@ -116,6 +128,10 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.Finalizer.TimestampResolution", expectedValue: types.NewDuration(10 * time.Second), }, + { + path: "Sequencer.Finalizer.L2BlockTime", + expectedValue: types.NewDuration(10 * time.Second), + }, { path: "Sequencer.DBManager.PoolRetrievalInterval", expectedValue: types.NewDuration(500 * time.Millisecond), diff --git a/config/default.go b/config/default.go index a12998c7c9..724818d373 100644 --- a/config/default.go +++ b/config/default.go @@ -126,11 +126,14 @@ MaxTxLifetime = "3h" SleepDuration = "100ms" ResourcePercentageToCloseBatch = 10 GERFinalityNumberOfBlocks = 64 + ForcedBatchesFinalityNumberOfBlocks = 64 + L1InfoRootFinalityNumberOfBlocks = 64 ClosingSignalsManagerWaitForCheckingL1Timeout = "10s" ClosingSignalsManagerWaitForCheckingGER = "10s" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" - ForcedBatchesFinalityNumberOfBlocks = 64 + WaitForCheckingL1InfoTree = "10s" TimestampResolution = "10s" + L2BlockTime = "3s" StopSequencerOnBatchNum = 0 SequentialReprocessFullBatch = false [Sequencer.DBManager] diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index a1c8ee5f4f..12665869e5 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -89,11 +89,14 @@ MaxTxLifetime = "3h" SleepDuration = "100ms" ResourcePercentageToCloseBatch = 10 GERFinalityNumberOfBlocks = 0 + ForcedBatchesFinalityNumberOfBlocks = 64 + L1InfoRootFinalityNumberOfBlocks = 64 ClosingSignalsManagerWaitForCheckingL1Timeout = "10s" ClosingSignalsManagerWaitForCheckingGER = "10s" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" - ForcedBatchesFinalityNumberOfBlocks = 64 + WaitForCheckingL1InfoTree = "10s" TimestampResolution = "10s" + L2BlockTime = "3s" StopSequencerOnBatchNum = 0 [Sequencer.DBManager] PoolRetrievalInterval = "500ms" diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 81c8b0d8bd..ad842668f6 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -42,14 +42,18 @@
"300ms"
SleepDuration is the time the finalizer sleeps between each iteration, if there are no transactions to be processed
"1m"
"300ms"
-
ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed
GERFinalityNumberOfBlocks is number of blocks to consider GER final
ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation
"1m"
+
ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed
GERFinalityNumberOfBlocks is number of blocks to consider GER final
ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
L1InfoRootFinalityNumberOfBlocks is number of blocks to consider L1InfoRoot final
ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation
"1m"
"300ms"
ClosingSignalsManagerWaitForCheckingGER is used by the closing signals manager to wait for its operation
"1m"
"300ms"
ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation
"1m"
"300ms"
-
ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
TimestampResolution is the resolution of the timestamp used to close a batch
"1m"
+
WaitForCheckingL1InfoRoot is the wait time to check if the L1InfoRoot has been updated
"1m"
+
"300ms"
+
TimestampResolution is the resolution of the timestamp used to close a batch
"1m"
"300ms"
+
L2BlockTime is the resolution of the timestamp used to close a L2 block
"1m"
+
"300ms"
StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number
SequentialReprocessFullBatch indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel)
"1m"
"300ms"
"1m"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index be8a597bac..6aac9021d0 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -1679,11 +1679,14 @@ MaxTxLifetime="3h0m0s"
| - [SleepDuration](#Sequencer_Finalizer_SleepDuration ) | No | string | No | - | Duration |
| - [ResourcePercentageToCloseBatch](#Sequencer_Finalizer_ResourcePercentageToCloseBatch ) | No | integer | No | - | ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed |
| - [GERFinalityNumberOfBlocks](#Sequencer_Finalizer_GERFinalityNumberOfBlocks ) | No | integer | No | - | GERFinalityNumberOfBlocks is number of blocks to consider GER final |
+| - [ForcedBatchesFinalityNumberOfBlocks](#Sequencer_Finalizer_ForcedBatchesFinalityNumberOfBlocks ) | No | integer | No | - | ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final |
+| - [L1InfoRootFinalityNumberOfBlocks](#Sequencer_Finalizer_L1InfoRootFinalityNumberOfBlocks ) | No | integer | No | - | L1InfoRootFinalityNumberOfBlocks is number of blocks to consider L1InfoRoot final |
| - [ClosingSignalsManagerWaitForCheckingL1Timeout](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingL1Timeout ) | No | string | No | - | Duration |
| - [ClosingSignalsManagerWaitForCheckingGER](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingGER ) | No | string | No | - | Duration |
| - [ClosingSignalsManagerWaitForCheckingForcedBatches](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingForcedBatches ) | No | string | No | - | Duration |
-| - [ForcedBatchesFinalityNumberOfBlocks](#Sequencer_Finalizer_ForcedBatchesFinalityNumberOfBlocks ) | No | integer | No | - | ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final |
+| - [WaitForCheckingL1InfoRoot](#Sequencer_Finalizer_WaitForCheckingL1InfoRoot ) | No | string | No | - | Duration |
| - [TimestampResolution](#Sequencer_Finalizer_TimestampResolution ) | No | string | No | - | Duration |
+| - [L2BlockTime](#Sequencer_Finalizer_L2BlockTime ) | No | string | No | - | Duration |
| - [StopSequencerOnBatchNum](#Sequencer_Finalizer_StopSequencerOnBatchNum ) | No | integer | No | - | StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number |
| - [SequentialReprocessFullBatch](#Sequencer_Finalizer_SequentialReprocessFullBatch ) | No | boolean | No | - | SequentialReprocessFullBatch indicates if the reprocess of a closed batch (sanity check) must be done in a
sequential way (instead than in parallel) |
@@ -1793,7 +1796,35 @@ ResourcePercentageToCloseBatch=10
GERFinalityNumberOfBlocks=64
```
-#### 10.6.6. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout`
+#### 10.6.6. `Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks`
+
+**Type:** : `integer`
+
+**Default:** `64`
+
+**Description:** ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
+
+**Example setting the default value** (64):
+```
+[Sequencer.Finalizer]
+ForcedBatchesFinalityNumberOfBlocks=64
+```
+
+#### 10.6.7. `Sequencer.Finalizer.L1InfoRootFinalityNumberOfBlocks`
+
+**Type:** : `integer`
+
+**Default:** `64`
+
+**Description:** L1InfoRootFinalityNumberOfBlocks is number of blocks to consider L1InfoRoot final
+
+**Example setting the default value** (64):
+```
+[Sequencer.Finalizer]
+L1InfoRootFinalityNumberOfBlocks=64
+```
+
+#### 10.6.8. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout`
**Title:** Duration
@@ -1819,7 +1850,7 @@ GERFinalityNumberOfBlocks=64
ClosingSignalsManagerWaitForCheckingL1Timeout="10s"
```
-#### 10.6.7. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingGER`
+#### 10.6.9. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingGER`
**Title:** Duration
@@ -1845,7 +1876,7 @@ ClosingSignalsManagerWaitForCheckingL1Timeout="10s"
ClosingSignalsManagerWaitForCheckingGER="10s"
```
-#### 10.6.8. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingForcedBatches`
+#### 10.6.10. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingForcedBatches`
**Title:** Duration
@@ -1871,21 +1902,33 @@ ClosingSignalsManagerWaitForCheckingGER="10s"
ClosingSignalsManagerWaitForCheckingForcedBatches="10s"
```
-#### 10.6.9. `Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks`
+#### 10.6.11. `Sequencer.Finalizer.WaitForCheckingL1InfoRoot`
-**Type:** : `integer`
+**Title:** Duration
-**Default:** `64`
+**Type:** : `string`
-**Description:** ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
+**Default:** `"0s"`
-**Example setting the default value** (64):
+**Description:** WaitForCheckingL1InfoRoot is the wait time to check if the L1InfoRoot has been updated
+
+**Examples:**
+
+```json
+"1m"
+```
+
+```json
+"300ms"
+```
+
+**Example setting the default value** ("0s"):
```
[Sequencer.Finalizer]
-ForcedBatchesFinalityNumberOfBlocks=64
+WaitForCheckingL1InfoRoot="0s"
```
-#### 10.6.10. `Sequencer.Finalizer.TimestampResolution`
+#### 10.6.12. `Sequencer.Finalizer.TimestampResolution`
**Title:** Duration
@@ -1911,7 +1954,33 @@ ForcedBatchesFinalityNumberOfBlocks=64
TimestampResolution="10s"
```
-#### 10.6.11. `Sequencer.Finalizer.StopSequencerOnBatchNum`
+#### 10.6.13. `Sequencer.Finalizer.L2BlockTime`
+
+**Title:** Duration
+
+**Type:** : `string`
+
+**Default:** `"3s"`
+
+**Description:** L2BlockTime is the resolution of the timestamp used to close a L2 block
+
+**Examples:**
+
+```json
+"1m"
+```
+
+```json
+"300ms"
+```
+
+**Example setting the default value** ("3s"):
+```
+[Sequencer.Finalizer]
+L2BlockTime="3s"
+```
+
+#### 10.6.14. `Sequencer.Finalizer.StopSequencerOnBatchNum`
**Type:** : `integer`
@@ -1925,7 +1994,7 @@ TimestampResolution="10s"
StopSequencerOnBatchNum=0
```
-#### 10.6.12. `Sequencer.Finalizer.SequentialReprocessFullBatch`
+#### 10.6.15. `Sequencer.Finalizer.SequentialReprocessFullBatch`
**Type:** : `boolean`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index c6eedbdc80..665c455232 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -670,6 +670,16 @@
"description": "GERFinalityNumberOfBlocks is number of blocks to consider GER final",
"default": 64
},
+ "ForcedBatchesFinalityNumberOfBlocks": {
+ "type": "integer",
+ "description": "ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final",
+ "default": 64
+ },
+ "L1InfoRootFinalityNumberOfBlocks": {
+ "type": "integer",
+ "description": "L1InfoRootFinalityNumberOfBlocks is number of blocks to consider L1InfoRoot final",
+ "default": 64
+ },
"ClosingSignalsManagerWaitForCheckingL1Timeout": {
"type": "string",
"title": "Duration",
@@ -700,10 +710,15 @@
"300ms"
]
},
- "ForcedBatchesFinalityNumberOfBlocks": {
- "type": "integer",
- "description": "ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final",
- "default": 64
+ "WaitForCheckingL1InfoRoot": {
+ "type": "string",
+ "title": "Duration",
+ "description": "WaitForCheckingL1InfoRoot is the wait time to check if the L1InfoRoot has been updated",
+ "default": "0s",
+ "examples": [
+ "1m",
+ "300ms"
+ ]
},
"TimestampResolution": {
"type": "string",
@@ -715,6 +730,16 @@
"300ms"
]
},
+ "L2BlockTime": {
+ "type": "string",
+ "title": "Duration",
+ "description": "L2BlockTime is the resolution of the timestamp used to close a L2 block",
+ "default": "3s",
+ "examples": [
+ "1m",
+ "300ms"
+ ]
+ },
"StopSequencerOnBatchNum": {
"type": "integer",
"description": "StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number",
diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go
index 6adb8787cf..d30bf1bff4 100644
--- a/sequencer/addrqueue.go
+++ b/sequencer/addrqueue.go
@@ -101,7 +101,7 @@ func (a *addrQueue) ExpireTransactions(maxTime time.Duration) ([]*TxTracker, *Tx
if txTracker.ReceivedAt.Add(maxTime).Before(time.Now()) {
txs = append(txs, txTracker)
delete(a.notReadyTxs, txTracker.Nonce)
- log.Debugf("Deleting notReadyTx %s from addrQueue %s", txTracker.HashStr, a.fromStr)
+ log.Debugf("deleting notReadyTx %s from addrQueue %s", txTracker.HashStr, a.fromStr)
}
}
@@ -109,7 +109,7 @@ func (a *addrQueue) ExpireTransactions(maxTime time.Duration) ([]*TxTracker, *Tx
prevReadyTx = a.readyTx
txs = append(txs, a.readyTx)
a.readyTx = nil
- log.Debugf("Deleting readyTx %s from addrQueue %s", prevReadyTx.HashStr, a.fromStr)
+ log.Debugf("deleting readyTx %s from addrQueue %s", prevReadyTx.HashStr, a.fromStr)
}
return txs, prevReadyTx
@@ -125,14 +125,14 @@ func (a *addrQueue) deleteTx(txHash common.Hash) (deletedReadyTx *TxTracker) {
txHashStr := txHash.String()
if (a.readyTx != nil) && (a.readyTx.HashStr == txHashStr) {
- log.Infof("Deleting readyTx %s from addrQueue %s", txHashStr, a.fromStr)
+ log.Infof("deleting readyTx %s from addrQueue %s", txHashStr, a.fromStr)
prevReadyTx := a.readyTx
a.readyTx = nil
return prevReadyTx
} else {
for _, txTracker := range a.notReadyTxs {
if txTracker.HashStr == txHashStr {
- log.Infof("Deleting notReadyTx %s from addrQueue %s", txHashStr, a.fromStr)
+ log.Infof("deleting notReadyTx %s from addrQueue %s", txHashStr, a.fromStr)
delete(a.notReadyTxs, txTracker.Nonce)
}
}
@@ -164,7 +164,7 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) (
txsToDelete := make([]*TxTracker, 0)
if balance != nil {
- log.Infof("Updating balance for addrQueue %s from %s to %s", a.fromStr, a.currentBalance.String(), balance.String())
+ log.Debugf("opdating balance for addrQueue %s from %s to %s", a.fromStr, a.currentBalance.String(), balance.String())
a.currentBalance = balance
}
@@ -179,7 +179,7 @@ func (a *addrQueue) updateCurrentNonceBalance(nonce *uint64, balance *big.Int) (
}
}
for _, txTracker := range txsToDelete {
- log.Infof("Deleting notReadyTx with nonce %d from addrQueue %s", txTracker.Nonce, a.fromStr)
+ log.Infof("deleting notReadyTx with nonce %d from addrQueue %s", txTracker.Nonce, a.fromStr)
delete(a.notReadyTxs, txTracker.Nonce)
}
}
diff --git a/sequencer/batch.go b/sequencer/batch.go
new file mode 100644
index 0000000000..38a35016d8
--- /dev/null
+++ b/sequencer/batch.go
@@ -0,0 +1,316 @@
+package sequencer
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/sequencer/metrics"
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Batch represents a wip or processed batch.
+type Batch struct {
+ batchNumber uint64
+ coinbase common.Address
+ timestamp time.Time
+ initialStateRoot common.Hash
+ stateRoot common.Hash
+ localExitRoot common.Hash
+ globalExitRoot common.Hash // 0x000...0 (ZeroHash) means to not update
+ accInputHash common.Hash //TODO: review use
+ countOfTxs int
+ remainingResources state.BatchResources
+ closingReason state.ClosingReason
+}
+
+func (w *Batch) isEmpty() bool {
+ return w.countOfTxs == 0
+}
+
+// getLastStateRoot gets the state root from the latest batch
+func (f *finalizer) getLastStateRoot(ctx context.Context) (common.Hash, error) {
+ var oldStateRoot common.Hash
+
+ batches, err := f.dbManager.GetLastNBatches(ctx, 2) //nolint:gomnd
+ if err != nil {
+ return common.Hash{}, fmt.Errorf("failed to get last %d batches, err: %w", 2, err) //nolint:gomnd
+ }
+
+ if len(batches) == 1 { //nolint:gomnd
+ oldStateRoot = batches[0].StateRoot
+ } else if len(batches) == 2 { //nolint:gomnd
+ oldStateRoot = batches[1].StateRoot
+ }
+
+ return oldStateRoot, nil
+}
+
+// getWIPBatch gets the last batch if still wip or opens a new one
+func (f *finalizer) getWIPBatch(ctx context.Context) {
+ for !f.isSynced(ctx) {
+ log.Info("wait for synchronizer to sync last batch")
+ time.Sleep(time.Second)
+ }
+
+ lastBatchNum, err := f.dbManager.GetLastBatchNumber(ctx)
+ if err != nil {
+ log.Fatalf("failed to get last batch number. Error: %s", err)
+ }
+
+ if lastBatchNum == 0 {
+ // GENESIS batch
+ processingCtx := f.dbManager.CreateFirstBatch(ctx, f.sequencerAddress)
+ timestamp := processingCtx.Timestamp
+ oldStateRoot, err := f.getLastStateRoot(ctx)
+ if err != nil {
+ log.Fatalf("failed to get old state root. Error: %s", err)
+ }
+ f.wipBatch = &Batch{
+ globalExitRoot: processingCtx.GlobalExitRoot,
+ initialStateRoot: oldStateRoot,
+ stateRoot: oldStateRoot,
+ batchNumber: processingCtx.BatchNumber,
+ coinbase: processingCtx.Coinbase,
+ timestamp: timestamp,
+ remainingResources: getMaxRemainingResources(f.batchConstraints),
+ }
+ } else {
+ // Get the last batch if is still wip, if not open a new one
+ lastBatch, err := f.dbManager.GetBatchByNumber(ctx, lastBatchNum, nil)
+ if err != nil {
+ log.Fatalf("failed to get last batch. Error: %s", err)
+ }
+
+ isClosed, err := f.dbManager.IsBatchClosed(ctx, lastBatchNum)
+ if err != nil {
+ log.Fatalf("failed to check if batch is closed. Error: %s", err)
+ }
+
+ log.Infof("batch %d isClosed: %v", lastBatchNum, isClosed)
+
+ if isClosed { //open new wip batch
+ ger, _, err := f.dbManager.GetLatestGer(ctx, f.cfg.GERFinalityNumberOfBlocks)
+ if err != nil {
+ log.Fatalf("failed to get latest ger. Error: %s", err)
+ }
+
+ oldStateRoot := lastBatch.StateRoot
+ f.wipBatch, err = f.openNewWIPBatch(ctx, lastBatchNum+1, ger.GlobalExitRoot, oldStateRoot)
+ if err != nil {
+ log.Fatalf("failed to open new wip batch. Error: %s", err)
+ }
+ } else { /// get wip batch
+ f.wipBatch, err = f.dbManager.GetWIPBatch(ctx)
+ if err != nil {
+ log.Fatalf("failed to get wip batch. Error: %s", err)
+ }
+ }
+ }
+
+ log.Infof("initial batch: %d, initialStateRoot: %s, stateRoot: %s, coinbase: %s, GER: %s, LER: %s",
+ f.wipBatch.batchNumber, f.wipBatch.initialStateRoot.String(), f.wipBatch.stateRoot.String(), f.wipBatch.coinbase.String(),
+ f.wipBatch.globalExitRoot.String(), f.wipBatch.localExitRoot.String())
+}
+
+// finalizeBatch retries to until successful closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
+func (f *finalizer) finalizeBatch(ctx context.Context) {
+ start := time.Now()
+ defer func() {
+ metrics.ProcessingTime(time.Since(start))
+ }()
+
+ var err error
+ f.wipBatch, err = f.closeAndOpenNewWIPBatch(ctx)
+ for err != nil { //TODO: we need to review is this for loop is needed or if it's better to halt if we have an error
+ log.Errorf("failed to create new WIP batch. Error: %s", err)
+ f.wipBatch, err = f.closeAndOpenNewWIPBatch(ctx)
+ }
+
+ log.Infof("new WIP batch %d", f.wipBatch.batchNumber)
+}
+
+// closeAndOpenNewWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
+func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context) (*Batch, error) {
+ // Finalize the wip L2 block if it has transactions, if not we keep it open to store it in the new wip batch
+ if !f.wipL2Block.isEmpty() {
+ f.finalizeL2Block(ctx)
+ }
+
+ // Wait until all L2 blocks are processed
+ startWait := time.Now()
+ f.pendingL2BlocksToProcessWG.Wait()
+ endWait := time.Now()
+ log.Debugf("waiting for pending L2 blocks to be processed took: %s", endWait.Sub(startWait).String())
+
+ // Wait until all L2 blocks are store
+ startWait = time.Now()
+ f.pendingL2BlocksToStoreWG.Wait()
+ endWait = time.Now()
+ log.Debugf("waiting for pending L2 blocks to be stored took: %s", endWait.Sub(startWait).String())
+
+ var err error
+ if f.wipBatch.stateRoot == state.ZeroHash {
+ return nil, errors.New("state root must have value to close batch")
+ }
+
+ // We need to process the batch to update the state root before closing the batch
+ if f.wipBatch.initialStateRoot == f.wipBatch.stateRoot {
+ log.Info("reprocessing batch because the state root has not changed...")
+ _, err = f.processTransaction(ctx, nil, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Reprocess full batch as sanity check
+ //TODO: Uncomment this
+ /*if f.cfg.SequentialReprocessFullBatch {
+ // Do the full batch reprocess now
+ _, err := f.reprocessFullBatch(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.stateRoot)
+ if err != nil {
+ // There is an error reprocessing the batch. We halt the execution of the Sequencer at this point
+ f.halt(ctx, fmt.Errorf("halting Sequencer because of error reprocessing full batch %d (sanity check). Error: %s ", f.wipBatch.batchNumber, err))
+ }
+ } else {
+ // Do the full batch reprocess in parallel
+ go func() {
+ _, _ = f.reprocessFullBatch(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.stateRoot)
+ }()
+ }*/
+
+ // Close the wip batch
+ err = f.closeWIPBatch(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to close batch, err: %w", err)
+ }
+
+ log.Infof("batch %d closed", f.wipBatch.batchNumber)
+
+ // Check if the batch is empty and sending a GER Update to the stream is needed
+ if f.streamServer != nil && f.wipBatch.isEmpty() && f.currentGERHash != f.previousGERHash {
+ updateGer := state.DSUpdateGER{
+ BatchNumber: f.wipBatch.batchNumber,
+ Timestamp: f.wipBatch.timestamp.Unix(),
+ GlobalExitRoot: f.wipBatch.globalExitRoot,
+ Coinbase: f.sequencerAddress,
+ ForkID: uint16(f.dbManager.GetForkIDByBatchNumber(f.wipBatch.batchNumber)),
+ StateRoot: f.wipBatch.stateRoot,
+ }
+
+ err = f.streamServer.StartAtomicOp()
+ if err != nil {
+ log.Errorf("failed to start atomic op for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
+ }
+
+ _, err = f.streamServer.AddStreamEntry(state.EntryTypeUpdateGER, updateGer.Encode())
+ if err != nil {
+ log.Errorf("failed to add stream entry for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
+ }
+
+ err = f.streamServer.CommitAtomicOp()
+ if err != nil {
+ log.Errorf("failed to commit atomic op for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
+ }
+ }
+
+ // Metadata for the next batch
+ stateRoot := f.wipBatch.stateRoot
+ lastBatchNumber := f.wipBatch.batchNumber
+
+ // Process Forced Batches
+ if len(f.nextForcedBatches) > 0 {
+ lastBatchNumber, stateRoot, err = f.processForcedBatches(ctx, lastBatchNumber, stateRoot)
+ if err != nil {
+ log.Warnf("failed to process forced batch, err: %s", err)
+ }
+ }
+
+ // Take into consideration the GER
+ f.nextGERMux.Lock()
+ if f.nextGER != state.ZeroHash {
+ f.previousGERHash = f.currentGERHash
+ f.currentGERHash = f.nextGER
+ }
+ f.nextGER = state.ZeroHash
+ f.nextGERDeadline = 0
+ f.nextGERMux.Unlock()
+
+ batch, err := f.openNewWIPBatch(ctx, lastBatchNumber+1, f.currentGERHash, stateRoot)
+
+ // Substract the bytes needed to store the changeL2Block tx into the new batch
+ batch.remainingResources.Bytes = batch.remainingResources.Bytes - changeL2BlockSize
+
+ return batch, err
+}
+
+// openNewWIPBatch opens a new batch in the state and returns it as WipBatch
+func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNum uint64, ger, stateRoot common.Hash) (*Batch, error) {
+ // open next batch
+ processingCtx := state.ProcessingContext{
+ BatchNumber: batchNum,
+ Coinbase: f.sequencerAddress,
+ Timestamp: now(),
+ GlobalExitRoot: ger,
+ }
+
+ dbTx, err := f.dbManager.BeginStateTransaction(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("failed to begin state transaction to open batch, err: %w", err)
+ }
+
+ // OpenBatch opens a new batch in the state
+ err = f.dbManager.OpenBatch(ctx, processingCtx, dbTx)
+ if err != nil {
+ if rollbackErr := dbTx.Rollback(ctx); rollbackErr != nil {
+ return nil, fmt.Errorf("failed to rollback dbTx: %s. Error: %w", rollbackErr.Error(), err)
+ }
+ return nil, fmt.Errorf("failed to open new batch. Error: %w", err)
+ }
+
+ if err := dbTx.Commit(ctx); err != nil {
+ return nil, fmt.Errorf("failed to commit database transaction for opening a batch. Error: %w", err)
+ }
+
+ // Check if synchronizer is up-to-date
+ for !f.isSynced(ctx) {
+ log.Info("wait for synchronizer to sync last batch")
+ time.Sleep(time.Second)
+ }
+
+ return &Batch{
+ batchNumber: batchNum,
+ coinbase: f.sequencerAddress,
+ initialStateRoot: stateRoot,
+ stateRoot: stateRoot,
+ timestamp: processingCtx.Timestamp,
+ globalExitRoot: ger,
+ remainingResources: getMaxRemainingResources(f.batchConstraints),
+ closingReason: state.EmptyClosingReason,
+ }, err
+}
+
+// closeWIPBatch closes the current batch in the state
+func (f *finalizer) closeWIPBatch(ctx context.Context) error {
+ transactions, effectivePercentages, err := f.dbManager.GetTransactionsByBatchNumber(ctx, f.wipBatch.batchNumber)
+ if err != nil {
+ return fmt.Errorf("failed to get transactions from transactions, err: %w", err)
+ }
+ for i, tx := range transactions {
+ log.Debugf("[closeWIPBatch] BatchNum: %d, Tx position: %d, txHash: %s", f.wipBatch.batchNumber, i, tx.Hash().String())
+ }
+ usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.remainingResources)
+ receipt := ClosingBatchParameters{
+ BatchNumber: f.wipBatch.batchNumber,
+ StateRoot: f.wipBatch.stateRoot,
+ LocalExitRoot: f.wipBatch.localExitRoot,
+ Txs: transactions,
+ EffectivePercentages: effectivePercentages,
+ BatchResources: usedResources,
+ ClosingReason: f.wipBatch.closingReason,
+ }
+ return f.dbManager.CloseBatch(ctx, receipt)
+}
diff --git a/sequencer/closingsignalsmanager.go b/sequencer/closingsignalsmanager.go
index 84aae76b0a..0b7a6da1e3 100644
--- a/sequencer/closingsignalsmanager.go
+++ b/sequencer/closingsignalsmanager.go
@@ -22,7 +22,7 @@ func newClosingSignalsManager(ctx context.Context, dbManager dbManagerInterface,
func (c *closingSignalsManager) Start() {
go c.checkForcedBatches()
- go c.checkGERUpdate()
+ //go c.checkGERUpdate() //TODO: delete this go func and all GER related data and funcs
}
func (c *closingSignalsManager) checkGERUpdate() {
diff --git a/sequencer/config.go b/sequencer/config.go
index 8f43428a3a..e0b01be527 100644
--- a/sequencer/config.go
+++ b/sequencer/config.go
@@ -62,6 +62,12 @@ type FinalizerCfg struct {
// GERFinalityNumberOfBlocks is number of blocks to consider GER final
GERFinalityNumberOfBlocks uint64 `mapstructure:"GERFinalityNumberOfBlocks"`
+ // ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
+ ForcedBatchesFinalityNumberOfBlocks uint64 `mapstructure:"ForcedBatchesFinalityNumberOfBlocks"`
+
+ // L1InfoRootFinalityNumberOfBlocks is number of blocks to consider L1InfoRoot final
+ L1InfoRootFinalityNumberOfBlocks uint64 `mapstructure:"L1InfoRootFinalityNumberOfBlocks"`
+
// ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation
ClosingSignalsManagerWaitForCheckingL1Timeout types.Duration `mapstructure:"ClosingSignalsManagerWaitForCheckingL1Timeout"`
@@ -71,12 +77,15 @@ type FinalizerCfg struct {
// ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation
ClosingSignalsManagerWaitForCheckingForcedBatches types.Duration `mapstructure:"ClosingSignalsManagerWaitForCheckingForcedBatches"`
- // ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final
- ForcedBatchesFinalityNumberOfBlocks uint64 `mapstructure:"ForcedBatchesFinalityNumberOfBlocks"`
+ // WaitForCheckingL1InfoRoot is the wait time to check if the L1InfoRoot has been updated
+ WaitForCheckingL1InfoRoot types.Duration `mapstructure:"WaitForCheckingL1InfoRoot"`
// TimestampResolution is the resolution of the timestamp used to close a batch
TimestampResolution types.Duration `mapstructure:"TimestampResolution"`
+ // L2BlockTime is the resolution of the timestamp used to close a L2 block
+ L2BlockTime types.Duration `mapstructure:"L2BlockTime"`
+
// StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number
StopSequencerOnBatchNum uint64 `mapstructure:"StopSequencerOnBatchNum"`
diff --git a/sequencer/dbmanager.go b/sequencer/dbmanager.go
index fe62f94db9..e235756b42 100644
--- a/sequencer/dbmanager.go
+++ b/sequencer/dbmanager.go
@@ -2,6 +2,7 @@ package sequencer
import (
"context"
+ "encoding/binary"
"math/big"
"time"
@@ -9,8 +10,10 @@ import (
"github.com/0xPolygonHermez/zkevm-node/log"
"github.com/0xPolygonHermez/zkevm-node/pool"
"github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/jackc/pgx/v4"
)
@@ -260,103 +263,95 @@ func (d *dbManager) DeleteTransactionFromPool(ctx context.Context, txHash common
return d.txPool.DeleteTransactionByHash(ctx, txHash)
}
-// StoreProcessedTxAndDeleteFromPool stores a tx into the state and changes it status in the pool
-func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error {
- d.checkStateInconsistency()
+func (d *dbManager) BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte {
+ changeL2Block := []byte{}
+ // changeL2Block transaction mark
+ changeL2Block = append(changeL2Block, changeL2BlockMark...)
+ // changeL2Block deltaTimeStamp
+ deltaTimestampBytes := make([]byte, 4)
+ binary.BigEndian.PutUint32(deltaTimestampBytes, deltaTimestamp)
+ changeL2Block = append(changeL2Block, deltaTimestampBytes...)
+ // changeL2Block l1InfoTreeIndexBytes
+ l1InfoTreeIndexBytes := make([]byte, 4)
+ binary.BigEndian.PutUint32(l1InfoTreeIndexBytes, uint32(l1InfoTreeIndex))
+ changeL2Block = append(changeL2Block, l1InfoTreeIndexBytes...)
- log.Debugf("Storing tx %v", tx.response.TxHash)
- dbTx, err := d.BeginStateTransaction(ctx)
- if err != nil {
- return err
- }
-
- l2BlockHeader, err := d.state.StoreTransaction(ctx, tx.batchNumber, tx.response, tx.coinbase, uint64(tx.timestamp.Unix()), tx.egpLog, dbTx)
- if err != nil {
- return err
- }
+ return changeL2Block
+}
- // Update batch l2 data
- batch, err := d.state.GetBatchByNumber(ctx, tx.batchNumber, dbTx)
- if err != nil {
- err2 := dbTx.Rollback(ctx)
- if err2 != nil {
- log.Errorf("failed to rollback dbTx when getting batch that gave err: %v. Rollback err: %v", err2, err)
- }
- return err
+// StoreL2Block stores a l2 block into the state, update the batch data and changes the status of the block txs in the pool
+func (d *dbManager) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error {
+ if dbTx == nil {
+ return state.ErrDBTxNil
}
- forkID := d.state.GetForkIDByBatchNumber(tx.batchNumber)
- txData, err := state.EncodeTransaction(tx.response.Tx, uint8(tx.response.EffectivePercentage), forkID)
- if err != nil {
- return err
- }
- batch.BatchL2Data = append(batch.BatchL2Data, txData...)
+ log.Debugf("storing l2 block %d, txs %d, hash %d", l2Block.BlockNumber, len(l2Block.TransactionResponses), l2Block.BlockHash.String())
+ start := time.Now()
- if !tx.isForcedBatch {
- err = d.state.UpdateBatchL2DataAndLER(ctx, tx.batchNumber, batch.BatchL2Data, tx.batchResponse.NewLocalExitRoot, dbTx)
- if err != nil {
- err2 := dbTx.Rollback(ctx)
- if err2 != nil {
- log.Errorf("failed to rollback dbTx when updating batch l2 data that gave err: %v. Rollback err: %v", err2, err)
- }
- return err
- }
- }
+ //d.checkStateInconsistency() //TODO: review this
- err = dbTx.Commit(ctx)
+ dbTx, err := d.BeginStateTransaction(ctx)
if err != nil {
return err
}
- if !tx.isForcedBatch {
- // Change Tx status to selected
- err = d.txPool.UpdateTxStatus(ctx, tx.response.TxHash, pool.TxStatusSelected, false, nil)
- if err != nil {
- return err
- }
+ header := &types.Header{
+ Number: new(big.Int).SetUint64(l2Block.BlockNumber),
+ ParentHash: l2Block.ParentHash,
+ Coinbase: l2Block.Coinbase,
+ Root: l2Block.BlockHash, //BlockHash is the StateRoot in Etrog
+ //TODO: GasLimit: state.cfg.MaxCumulativeGasUsed,
+ Time: l2Block.Timestamp,
+ GasUsed: l2Block.GasUsed,
}
- log.Infof("StoreProcessedTxAndDeleteFromPool: successfully stored tx: %v for batch: %v", tx.response.TxHash.String(), tx.batchNumber)
+ l2Header := state.NewL2Header(header)
- // Send data to streamer
- if d.streamServer != nil {
- forkID := d.state.GetForkIDByBatchNumber(tx.batchNumber)
+ l2Header.GlobalExitRoot = &l2Block.GlobalExitRoot
+ //l2header.LocalExitRoot = l2Block.
- l2Block := state.DSL2Block{
- BatchNumber: tx.batchNumber,
- L2BlockNumber: l2BlockHeader.Number.Uint64(),
- Timestamp: tx.timestamp.Unix(),
- GlobalExitRoot: batch.GlobalExitRoot,
- Coinbase: tx.coinbase,
- ForkID: uint16(forkID),
- BlockHash: l2BlockHeader.Hash(),
- StateRoot: l2BlockHeader.Root,
- }
+ transactions := []*types.Transaction{}
+ storeTxsEGPData := []state.StoreTxEGPData{}
+ receipts := []*types.Receipt{}
- binaryTxData, err := tx.response.Tx.MarshalBinary()
- if err != nil {
- return err
+ for i, txResponse := range l2Block.TransactionResponses {
+ // if the transaction has an intrinsic invalid tx error it means
+ // the transaction has not changed the state, so we don't store it
+ if executor.IsIntrinsicError(executor.RomErrorCode(txResponse.RomError)) {
+ continue
}
- l2Transaction := state.DSL2Transaction{
- L2BlockNumber: l2Block.L2BlockNumber,
- EffectiveGasPricePercentage: uint8(tx.response.EffectivePercentage),
- IsValid: 1,
- EncodedLength: uint32(len(binaryTxData)),
- Encoded: binaryTxData,
- }
+ transactions = append(transactions, &txResponse.Tx)
- d.dataToStream <- state.DSL2FullBlock{
- DSL2Block: l2Block,
- Txs: []state.DSL2Transaction{l2Transaction},
+ storeTxsEGPData = append(storeTxsEGPData, state.StoreTxEGPData{EGPLog: nil, EffectivePercentage: uint8(txResponse.EffectivePercentage)})
+ if txsEGPLog != nil {
+ storeTxsEGPData[i].EGPLog = txsEGPLog[i]
}
+
+ receipt := state.GenerateReceipt(header.Number, txResponse)
+ receipts = append(receipts, receipt)
}
+ // Create block to be able to calculate its hash
+ block := state.NewL2Block(l2Header, transactions, []*state.L2Header{}, receipts, &trie.StackTrie{})
+ block.ReceivedAt = time.Unix(int64(l2Block.Timestamp), 0)
+
+ for _, receipt := range receipts {
+ receipt.BlockHash = block.Hash()
+ }
+
+ // Store L2 block and its transactions
+ if err := d.state.AddL2Block(ctx, batchNumber, block, receipts, storeTxsEGPData, dbTx); err != nil {
+ return err
+ }
+
+ log.Debugf("successfully stored L2 block %d for batch %d, storing time %v", header.Number, batchNumber, time.Since(start))
+
return nil
}
// GetWIPBatch returns ready WIP batch
-func (d *dbManager) GetWIPBatch(ctx context.Context) (*WipBatch, error) {
+func (d *dbManager) GetWIPBatch(ctx context.Context) (*Batch, error) {
const two = 2
var lastBatch, previousLastBatch *state.Batch
dbTx, err := d.BeginStateTransaction(ctx)
@@ -399,7 +394,7 @@ func (d *dbManager) GetWIPBatch(ctx context.Context) (*WipBatch, error) {
}
}
- wipBatch := &WipBatch{
+ wipBatch := &Batch{
batchNumber: lastBatch.BatchNumber,
coinbase: lastBatch.Coinbase,
localExitRoot: lastBatch.LocalExitRoot,
@@ -521,6 +516,11 @@ func (d *dbManager) GetLatestGer(ctx context.Context, gerFinalityNumberOfBlocks
return d.state.GetLatestGer(ctx, gerFinalityNumberOfBlocks)
}
+// GetLatestL1InfoRoot gets the latest L1InfoRoot
+func (d *dbManager) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) {
+ return d.state.GetLatestL1InfoRoot(ctx, maxBlockNumber)
+}
+
// CloseBatch closes a batch in the state
func (d *dbManager) CloseBatch(ctx context.Context, params ClosingBatchParameters) error {
processingReceipt := state.ProcessingReceipt{
@@ -669,6 +669,10 @@ func (d *dbManager) GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Block
return d.state.GetLastBlock(ctx, dbTx)
}
+func (d *dbManager) GetLastL2Block(ctx context.Context, dbTx pgx.Tx) (*state.L2Block, error) {
+ return d.state.GetLastL2Block(ctx, dbTx)
+}
+
func (d *dbManager) GetLastTrustedForcedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) {
return d.state.GetLastTrustedForcedBatchNumber(ctx, dbTx)
}
@@ -685,6 +689,10 @@ func (d *dbManager) UpdateTxStatus(ctx context.Context, hash common.Hash, newSta
return d.txPool.UpdateTxStatus(ctx, hash, newStatus, isWIP, failedReason)
}
+func (d *dbManager) UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
+ return d.state.UpdateBatch(ctx, batchNumber, batchL2Data, localExitRoot, dbTx)
+}
+
// GetLatestVirtualBatchTimestamp gets last virtual batch timestamp
func (d *dbManager) GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error) {
return d.state.GetLatestVirtualBatchTimestamp(ctx, dbTx)
@@ -728,3 +736,48 @@ func (d *dbManager) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64
func (d *dbManager) GetForkIDByBatchNumber(batchNumber uint64) uint64 {
return d.state.GetForkIDByBatchNumber(batchNumber)
}
+
+func (d *dbManager) DSSendL2Block(l2Block *L2Block) error {
+ blockResponse := l2Block.batchResponse.BlockResponses[0]
+ forkID := d.GetForkIDByBatchNumber(l2Block.batchNumber)
+
+ // Send data to streamer
+ if d.streamServer != nil {
+ l2Block := state.DSL2Block{
+ BatchNumber: l2Block.batchNumber,
+ L2BlockNumber: blockResponse.BlockNumber,
+ Timestamp: int64(blockResponse.Timestamp),
+ GlobalExitRoot: blockResponse.BlockInfoRoot, //TODO: is it ok?
+ Coinbase: l2Block.coinbase,
+ ForkID: uint16(forkID),
+ BlockHash: blockResponse.BlockHash,
+ StateRoot: blockResponse.BlockHash, //TODO: in etrog the blockhash is the block root
+ }
+
+ l2Transactions := []state.DSL2Transaction{}
+
+ for _, txResponse := range blockResponse.TransactionResponses {
+ binaryTxData, err := txResponse.Tx.MarshalBinary()
+ if err != nil {
+ return err
+ }
+
+ l2Transaction := state.DSL2Transaction{
+ L2BlockNumber: blockResponse.BlockNumber,
+ EffectiveGasPricePercentage: uint8(txResponse.EffectivePercentage),
+ IsValid: 1,
+ EncodedLength: uint32(len(binaryTxData)),
+ Encoded: binaryTxData,
+ }
+
+ l2Transactions = append(l2Transactions, l2Transaction)
+ }
+
+ d.dataToStream <- state.DSL2FullBlock{ //TODO: review channel manages the slice of txs
+ DSL2Block: l2Block,
+ Txs: l2Transactions,
+ }
+ }
+
+ return nil
+}
diff --git a/sequencer/errors.go b/sequencer/errors.go
index ab231dc348..44fbc8bdd0 100644
--- a/sequencer/errors.go
+++ b/sequencer/errors.go
@@ -25,4 +25,8 @@ var (
ErrStateRootNoMatch = errors.New("state root no match")
// ErrExecutorError happens when we got an executor error when processing a batch
ErrExecutorError = errors.New("executor error")
+ // ErrNoFittingTransaction happens when there is not a tx (from the txSortedList) that fits in the remaining batch resources
+ ErrNoFittingTransaction = errors.New("no fit transaction")
+ // ErrTransactionsListEmpty happens when txSortedList is empty
+ ErrTransactionsListEmpty = errors.New("transactions list empty")
)
diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go
index eda0e95a0d..8f96f962d5 100644
--- a/sequencer/finalizer.go
+++ b/sequencer/finalizer.go
@@ -22,15 +22,16 @@ import (
"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/ethereum/go-ethereum/common"
ethereumTypes "github.com/ethereum/go-ethereum/core/types"
- "github.com/jackc/pgx/v4"
)
const (
- pendingTxsBufferSizeMultiplier = 10
+ pendingL2BlocksBufferSize = 100
+ changeL2BlockSize = 9 //1 byte (tx type = 0B) + 4 bytes for deltaTimestamp + 4 for l1InfoTreeIndex
)
var (
- now = time.Now
+ now = time.Now
+ mockL1InfoRoot = common.Hash{}
)
// finalizer represents the finalizer component of the sequencer.
@@ -41,7 +42,9 @@ type finalizer struct {
worker workerInterface
dbManager dbManagerInterface
executor stateInterface
- wipBatch *WipBatch
+ etherman etherman
+ wipBatch *Batch
+ wipL2Block *L2Block
batchConstraints state.BatchConstraintsCfg
reprocessFullBatchError atomic.Bool
// closing signals
@@ -51,20 +54,28 @@ type finalizer struct {
previousGERHash common.Hash // GER of the batch previous to the current WIP batch
nextGER common.Hash
nextGERDeadline int64
- nextGERMux *sync.RWMutex
+ nextGERMux *sync.Mutex
// forced batches
nextForcedBatches []state.ForcedBatch
nextForcedBatchDeadline int64
- nextForcedBatchesMux *sync.RWMutex
+ nextForcedBatchesMux *sync.Mutex
+ // L1InfoTree
+ lastL1InfoTreeValid bool
+ lastL1InfoTree state.L1InfoTreeExitRootStorageEntry
+ lastL1InfoTreeMux *sync.Mutex
+ lastL1InfoTreeCond *sync.Cond
// L2 reorg
handlingL2Reorg bool
// event log
eventLog *event.EventLog
// effective gas price calculation instance
effectiveGasPrice *pool.EffectiveGasPrice
- // pending txs to store in the state
- pendingTransactionsToStore chan transactionToStore
- pendingTransactionsToStoreWG *sync.WaitGroup
+ // pending L2 blocks to be processed (executor)
+ pendingL2BlocksToProcess chan *L2Block
+ pendingL2BlocksToProcessWG *sync.WaitGroup
+ // pending L2 blocks to store in the state
+ pendingL2BlocksToStore chan *L2Block
+ pendingL2BlocksToStoreWG *sync.WaitGroup
// executer flushid control
proverID string
storedFlushID uint64
@@ -75,39 +86,6 @@ type finalizer struct {
streamServer *datastreamer.StreamServer
}
-type transactionToStore struct {
- hash common.Hash
- from common.Address
- response *state.ProcessTransactionResponse
- batchResponse *state.ProcessBatchResponse
- batchNumber uint64
- timestamp time.Time
- coinbase common.Address
- oldStateRoot common.Hash
- isForcedBatch bool
- flushId uint64
- egpLog *state.EffectiveGasPriceLog
-}
-
-// WipBatch represents a work-in-progress batch.
-type WipBatch struct {
- batchNumber uint64
- coinbase common.Address
- timestamp time.Time
- initialStateRoot common.Hash
- stateRoot common.Hash
- localExitRoot common.Hash
- globalExitRoot common.Hash // 0x000...0 (ZeroHash) means to not update
- oldAccInputHash common.Hash
- countOfTxs int
- remainingResources state.BatchResources
- closingReason state.ClosingReason
-}
-
-func (w *WipBatch) isEmpty() bool {
- return w.countOfTxs == 0
-}
-
// newFinalizer returns a new instance of Finalizer.
func newFinalizer(
cfg FinalizerCfg,
@@ -115,6 +93,7 @@ func newFinalizer(
worker workerInterface,
dbManager dbManagerInterface,
executor stateInterface,
+ etherman etherman,
sequencerAddr common.Address,
isSynced func(ctx context.Context) bool,
closingSignalCh ClosingSignalCh,
@@ -129,29 +108,37 @@ func newFinalizer(
worker: worker,
dbManager: dbManager,
executor: executor,
+ etherman: etherman,
batchConstraints: batchConstraints,
// closing signals
closingSignalCh: closingSignalCh,
- // GER
+ // GER //TODO: Delete GER updates as in ETROG it's not used
currentGERHash: state.ZeroHash,
previousGERHash: state.ZeroHash,
- nextGER: common.Hash{},
+ nextGER: state.ZeroHash,
nextGERDeadline: 0,
- nextGERMux: new(sync.RWMutex),
+ nextGERMux: new(sync.Mutex),
// forced batches
nextForcedBatches: make([]state.ForcedBatch, 0),
nextForcedBatchDeadline: 0,
- nextForcedBatchesMux: new(sync.RWMutex),
+ nextForcedBatchesMux: new(sync.Mutex),
+ // L1InfoTree
+ lastL1InfoTreeValid: false,
+ lastL1InfoTreeMux: new(sync.Mutex),
+ lastL1InfoTreeCond: sync.NewCond(&sync.Mutex{}),
// L2 reorg
handlingL2Reorg: false,
// event log
eventLog: eventLog,
// effective gas price calculation instance
effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice, poolCfg.DefaultMinGasPriceAllowed),
- // pending txs to store in the state
- pendingTransactionsToStore: make(chan transactionToStore, batchConstraints.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier),
- pendingTransactionsToStoreWG: new(sync.WaitGroup),
- storedFlushID: 0,
+ // pending L2 blocks to be processed (executor)
+ pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize), //TODO: review buffer size
+ pendingL2BlocksToProcessWG: new(sync.WaitGroup),
+ // pending L2 blocks to store in the state
+ pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize), //TODO: review buffer size
+ pendingL2BlocksToStoreWG: new(sync.WaitGroup),
+ storedFlushID: 0,
// executer flushid control
proverID: "",
storedFlushIDCond: sync.NewCond(&sync.Mutex{}),
@@ -168,146 +155,36 @@ func newFinalizer(
// Start starts the finalizer.
func (f *finalizer) Start(ctx context.Context) {
+ // Init mockL1InfoRoot to a mock value since it must be different to {0,0,...,0}
+ for i := 0; i < len(mockL1InfoRoot); i++ {
+ mockL1InfoRoot[i] = byte(i)
+ }
+
+ // Update L1InfoRoot
+ go f.checkL1InfoRootUpdate(ctx)
+
// Get the last batch if still wip or opens a new one
f.getWIPBatch(ctx)
+ // Initializes the wip L2 block
+ f.initWIPL2Block(ctx)
+
// Closing signals receiver
go f.listenForClosingSignals(ctx)
// Update the prover id and flush id
go f.updateProverIdAndFlushId(ctx)
- // Store Pending transactions
- go f.storePendingTransactions(ctx)
+ // Process L2 Blocks
+ go f.processPendingL2Blocks(ctx)
+
+ // Store L2 Blocks
+ go f.storePendingL2Blocks(ctx)
// Processing transactions and finalizing batches
f.finalizeBatches(ctx)
}
-// getLastStateRoot gets the state root from the latest batch
-func (f *finalizer) getLastStateRoot(ctx context.Context) (common.Hash, error) {
- var oldStateRoot common.Hash
-
- batches, err := f.dbManager.GetLastNBatches(ctx, 2) //nolint:gomnd
- if err != nil {
- return common.Hash{}, fmt.Errorf("failed to get last %d batches, err: %w", 2, err) //nolint:gomnd
- }
-
- if len(batches) == 1 { //nolint:gomnd
- oldStateRoot = batches[0].StateRoot
- } else if len(batches) == 2 { //nolint:gomnd
- oldStateRoot = batches[1].StateRoot
- }
-
- return oldStateRoot, nil
-}
-
-// getWIPBatch gets the last batch if still wip or opens a new one
-func (f *finalizer) getWIPBatch(ctx context.Context) {
- for !f.isSynced(ctx) {
- log.Info("wait for synchronizer to sync last batch")
- time.Sleep(time.Second)
- }
-
- lastBatchNum, err := f.dbManager.GetLastBatchNumber(ctx)
- if err != nil {
- log.Fatalf("failed to get last batch number. Error: %v", err)
- }
-
- if lastBatchNum == 0 {
- // GENESIS batch
- processingCtx := f.dbManager.CreateFirstBatch(ctx, f.sequencerAddress)
- timestamp := processingCtx.Timestamp
- oldStateRoot, err := f.getLastStateRoot(ctx)
- if err != nil {
- log.Fatalf("failed to get old state root. Error: %v", err)
- }
- f.wipBatch = &WipBatch{
- globalExitRoot: processingCtx.GlobalExitRoot,
- initialStateRoot: oldStateRoot,
- stateRoot: oldStateRoot,
- batchNumber: processingCtx.BatchNumber,
- coinbase: processingCtx.Coinbase,
- timestamp: timestamp,
- remainingResources: getMaxRemainingResources(f.batchConstraints),
- }
- } else {
- // Get the last batch if is still wip, if not open a new one
- lastBatch, err := f.dbManager.GetBatchByNumber(ctx, lastBatchNum, nil)
- if err != nil {
- log.Fatalf("failed to get last batch. Error: %w", err)
- }
-
- isClosed, err := f.dbManager.IsBatchClosed(ctx, lastBatchNum)
- if err != nil {
- log.Fatalf("failed to check if batch is closed. Error: %w", err)
- }
-
- log.Infof("batch %d isClosed: %v", lastBatchNum, isClosed)
-
- if isClosed { //open new wip batch
- ger, _, err := f.dbManager.GetLatestGer(ctx, f.cfg.GERFinalityNumberOfBlocks)
- if err != nil {
- log.Fatalf("failed to get latest ger. Error: %w", err)
- }
-
- oldStateRoot := lastBatch.StateRoot
- f.wipBatch, err = f.openNewWIPBatch(ctx, lastBatchNum+1, ger.GlobalExitRoot, oldStateRoot)
- if err != nil {
- log.Fatalf("failed to open new wip batch. Error: %w", err)
- }
- } else { /// get wip batch
- f.wipBatch, err = f.dbManager.GetWIPBatch(ctx)
- if err != nil {
- log.Fatalf("failed to get wip batch. Error: %w", err)
- }
- }
- }
-
- log.Infof("initial batch: %d, initialStateRoot: %s, stateRoot: %s, coinbase: %s, GER: %s, LER: %s",
- f.wipBatch.batchNumber, f.wipBatch.initialStateRoot.String(), f.wipBatch.stateRoot.String(), f.wipBatch.coinbase.String(),
- f.wipBatch.globalExitRoot.String(), f.wipBatch.localExitRoot.String())
-}
-
-// storePendingTransactions stores the pending transactions in the database
-func (f *finalizer) storePendingTransactions(ctx context.Context) {
- for {
- select {
- case tx, ok := <-f.pendingTransactionsToStore:
- if !ok {
- // Channel is closed
- return
- }
-
- // Wait until f.storedFlushID >= tx.flushId
- f.storedFlushIDCond.L.Lock()
- for f.storedFlushID < tx.flushId {
- f.storedFlushIDCond.Wait()
- // check if context is done after waking up
- if ctx.Err() != nil {
- f.storedFlushIDCond.L.Unlock()
- return
- }
- }
- f.storedFlushIDCond.L.Unlock()
-
- // Now f.storedFlushID >= tx.flushId, we can store tx
- f.storeProcessedTx(ctx, tx)
-
- // Delete the tx from the pending list in the worker (addrQueue)
- f.worker.DeletePendingTxToStore(tx.hash, tx.from)
-
- f.pendingTransactionsToStoreWG.Done()
- case <-ctx.Done():
- // The context was cancelled from outside, Wait for all goroutines to finish, cleanup and exit
- f.pendingTransactionsToStoreWG.Wait()
- return
- default:
- time.Sleep(100 * time.Millisecond) //nolint:gomnd
- }
- }
-}
-
// updateProverIdAndFlushId updates the prover id and flush id
func (f *finalizer) updateProverIdAndFlushId(ctx context.Context) {
for {
@@ -319,7 +196,7 @@ func (f *finalizer) updateProverIdAndFlushId(ctx context.Context) {
}
f.pendingFlushIDCond.L.Unlock()
- for f.storedFlushID < f.lastPendingFlushID {
+ for f.storedFlushID < f.lastPendingFlushID { //TODO: review this loop as could be is pulling all the time, no sleep
storedFlushID, proverID, err := f.dbManager.GetStoredFlushID(ctx)
if err != nil {
log.Errorf("failed to get stored flush id, Err: %v", err)
@@ -339,6 +216,55 @@ func (f *finalizer) updateProverIdAndFlushId(ctx context.Context) {
}
}
+func (f *finalizer) checkL1InfoRootUpdate(ctx context.Context) {
+ var (
+ lastL1InfoRootIndex uint32
+ firstL1InfoRootUpdate = true
+ firstSleepSkipped = false
+ )
+
+ for {
+ if firstSleepSkipped {
+ time.Sleep(f.cfg.WaitForCheckingL1InfoRoot.Duration)
+ } else {
+ firstSleepSkipped = true
+ }
+
+ lastL1BlockNumber, err := f.etherman.GetLatestBlockNumber(ctx)
+ if err != nil {
+ log.Errorf("error getting latest L1 block number: %v", err)
+ }
+
+ maxBlockNumber := uint64(0)
+ if f.cfg.L1InfoRootFinalityNumberOfBlocks <= lastL1BlockNumber {
+ maxBlockNumber = lastL1BlockNumber - f.cfg.L1InfoRootFinalityNumberOfBlocks
+ }
+
+ l1InfoRoot, err := f.dbManager.GetLatestL1InfoRoot(ctx, maxBlockNumber)
+ if err != nil {
+ log.Errorf("error checking latest L1InfoRoot: %v", err)
+ continue
+ }
+
+ if firstL1InfoRootUpdate || l1InfoRoot.L1InfoTreeIndex > lastL1InfoRootIndex {
+ firstL1InfoRootUpdate = false
+
+ log.Debugf("received new L1InfoRoot. L1InfoTreeIndex: %d", l1InfoRoot.L1InfoTreeIndex)
+
+ f.lastL1InfoTreeMux.Lock()
+ f.lastL1InfoTree = l1InfoRoot
+ f.lastL1InfoTreeMux.Unlock()
+
+ if !f.lastL1InfoTreeValid {
+ f.lastL1InfoTreeCond.L.Lock()
+ f.lastL1InfoTreeValid = true
+ f.lastL1InfoTreeCond.Broadcast()
+ f.lastL1InfoTreeCond.L.Unlock()
+ }
+ }
+ }
+}
+
// listenForClosingSignals listens for signals for the batch and sets the deadline for when they need to be closed.
func (f *finalizer) listenForClosingSignals(ctx context.Context) {
for {
@@ -384,22 +310,6 @@ func (f *finalizer) updateLastPendingFlushID(newFlushID uint64) {
}
}
-// addPendingTxToStore adds a pending tx that is ready to be stored in the state DB once its flushid has been stored by the executor
-func (f *finalizer) addPendingTxToStore(ctx context.Context, txToStore transactionToStore) {
- f.pendingTransactionsToStoreWG.Add(1)
-
- f.worker.AddPendingTxToStore(txToStore.hash, txToStore.from)
-
- select {
- case f.pendingTransactionsToStore <- txToStore:
- case <-ctx.Done():
- // If context is cancelled before we can send to the channel, we must decrement the WaitGroup count and
- // delete the pending TxToStore added in the worker
- f.pendingTransactionsToStoreWG.Done()
- f.worker.DeletePendingTxToStore(txToStore.hash, txToStore.from)
- }
-}
-
// finalizeBatches runs the endless loop for processing transactions finalizing batches.
func (f *finalizer) finalizeBatches(ctx context.Context) {
log.Debug("finalizer init loop")
@@ -410,7 +320,18 @@ func (f *finalizer) finalizeBatches(ctx context.Context) {
f.halt(ctx, fmt.Errorf("finalizer reached stop sequencer batch number: %v", f.cfg.StopSequencerOnBatchNum))
}
- tx := f.worker.GetBestFittingTx(f.wipBatch.remainingResources)
+ // We have reached the L2 block time, we need to close the current L2 block and open a new one
+ if !f.wipL2Block.timestamp.Add(f.cfg.L2BlockTime.Duration).After(time.Now()) {
+ f.finalizeL2Block(ctx)
+ }
+
+ tx, err := f.worker.GetBestFittingTx(f.wipBatch.remainingResources)
+
+ // If we have txs pending to process but none of them fits into the wip batch, we close the wip batch and open a new one
+ if err == ErrNoFittingTransaction { //TODO: review this with JEC
+ f.finalizeBatch(ctx)
+ }
+
metrics.WorkerProcessingTime(time.Since(start))
if tx != nil {
log.Debugf("processing tx: %s", tx.Hash.Hex())
@@ -451,7 +372,7 @@ func (f *finalizer) finalizeBatches(ctx context.Context) {
if f.isDeadlineEncountered() {
f.finalizeBatch(ctx)
- } else if f.maxTxsPerBatchReached() || f.isBatchResourcesFull() {
+ } else if f.maxTxsPerBatchReached() || f.isBatchResourcesExhausted() {
f.finalizeBatch(ctx)
}
@@ -489,44 +410,6 @@ func (f *finalizer) maxTxsPerBatchReached() bool {
return false
}
-// finalizeBatch retries to until successful closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
-func (f *finalizer) finalizeBatch(ctx context.Context) {
- start := time.Now()
- defer func() {
- metrics.ProcessingTime(time.Since(start))
- }()
-
- var err error
- f.wipBatch, err = f.closeAndOpenNewWIPBatch(ctx)
- for err != nil { //TODO: we need to review is this for loop is needed or if it's better to halt if we have an error
- log.Errorf("failed to create new work-in-progress batch, Err: %s", err)
- f.wipBatch, err = f.closeAndOpenNewWIPBatch(ctx)
- }
-}
-
-// halt halts the finalizer
-func (f *finalizer) halt(ctx context.Context, err error) {
- event := &event.Event{
- ReceivedAt: time.Now(),
- Source: event.Source_Node,
- Component: event.Component_Sequencer,
- Level: event.Level_Critical,
- EventID: event.EventID_FinalizerHalt,
- Description: fmt.Sprintf("finalizer halted due to error: %s", err),
- }
-
- eventErr := f.eventLog.LogEvent(ctx, event)
- if eventErr != nil {
- log.Errorf("error storing finalizer halt event: %v", eventErr)
- }
-
- for {
- log.Errorf("fatal error: %s", err)
- log.Error("halting the finalizer")
- time.Sleep(5 * time.Second) //nolint:gomnd
- }
-}
-
// checkIfProverRestarted checks if the proverID changed
func (f *finalizer) checkIfProverRestarted(proverID string) {
if f.proverID != "" && f.proverID != proverID {
@@ -548,104 +431,6 @@ func (f *finalizer) checkIfProverRestarted(proverID string) {
}
}
-// closeAndOpenNewWIPBatch closes the current batch and opens a new one, potentially processing forced batches between the batch is closed and the resulting new empty batch
-func (f *finalizer) closeAndOpenNewWIPBatch(ctx context.Context) (*WipBatch, error) {
- // Wait until all processed transactions are saved
- startWait := time.Now()
- f.pendingTransactionsToStoreWG.Wait()
- endWait := time.Now()
-
- log.Debugf("waiting for pending transactions to be stored took: %s", endWait.Sub(startWait).String())
-
- var err error
- if f.wipBatch.stateRoot == state.ZeroHash {
- return nil, errors.New("state root must have value to close batch")
- }
-
- // We need to process the batch to update the state root before closing the batch
- if f.wipBatch.initialStateRoot == f.wipBatch.stateRoot {
- log.Info("reprocessing batch because the state root has not changed...")
- _, err = f.processTransaction(ctx, nil, true)
- if err != nil {
- return nil, err
- }
- }
-
- // Reprocess full batch as sanity check
- if f.cfg.SequentialReprocessFullBatch {
- // Do the full batch reprocess now
- _, err := f.reprocessFullBatch(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.stateRoot)
- if err != nil {
- // There is an error reprocessing the batch. We halt the execution of the Sequencer at this point
- f.halt(ctx, fmt.Errorf("halting Sequencer because of error reprocessing full batch %d (sanity check). Error: %s ", f.wipBatch.batchNumber, err))
- }
- } else {
- // Do the full batch reprocess in parallel
- go func() {
- _, _ = f.reprocessFullBatch(ctx, f.wipBatch.batchNumber, f.wipBatch.initialStateRoot, f.wipBatch.stateRoot)
- }()
- }
-
- // Close the current batch
- err = f.closeWIPBatch(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to close batch, err: %w", err)
- }
-
- // Check if the batch is empty and sending a GER Update to the stream is needed
- if f.streamServer != nil && f.wipBatch.isEmpty() && f.currentGERHash != f.previousGERHash {
- updateGer := state.DSUpdateGER{
- BatchNumber: f.wipBatch.batchNumber,
- Timestamp: f.wipBatch.timestamp.Unix(),
- GlobalExitRoot: f.wipBatch.globalExitRoot,
- Coinbase: f.sequencerAddress,
- ForkID: uint16(f.dbManager.GetForkIDByBatchNumber(f.wipBatch.batchNumber)),
- StateRoot: f.wipBatch.stateRoot,
- }
-
- err = f.streamServer.StartAtomicOp()
- if err != nil {
- log.Errorf("failed to start atomic op for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
- }
-
- _, err = f.streamServer.AddStreamEntry(state.EntryTypeUpdateGER, updateGer.Encode())
- if err != nil {
- log.Errorf("failed to add stream entry for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
- }
-
- err = f.streamServer.CommitAtomicOp()
- if err != nil {
- log.Errorf("failed to commit atomic op for Update GER on batch %v: %v", f.wipBatch.batchNumber, err)
- }
- }
-
- // Metadata for the next batch
- stateRoot := f.wipBatch.stateRoot
- lastBatchNumber := f.wipBatch.batchNumber
-
- // Process Forced Batches
- if len(f.nextForcedBatches) > 0 {
- lastBatchNumber, stateRoot, err = f.processForcedBatches(ctx, lastBatchNumber, stateRoot)
- if err != nil {
- log.Warnf("failed to process forced batch, err: %s", err)
- }
- }
-
- // Take into consideration the GER
- f.nextGERMux.Lock()
- if f.nextGER != state.ZeroHash {
- f.previousGERHash = f.currentGERHash
- f.currentGERHash = f.nextGER
- }
- f.nextGER = state.ZeroHash
- f.nextGERDeadline = 0
- f.nextGERMux.Unlock()
-
- batch, err := f.openNewWIPBatch(ctx, lastBatchNumber+1, f.currentGERHash, stateRoot)
-
- return batch, err
-}
-
// processTransaction processes a single transaction.
func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, firstTxProcess bool) (errWg *sync.WaitGroup, err error) {
var txHash string
@@ -662,26 +447,22 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first
}()
executorBatchRequest := state.ProcessRequest{
- BatchNumber: f.wipBatch.batchNumber,
- OldStateRoot: f.wipBatch.stateRoot,
- OldAccInputHash: f.wipBatch.oldAccInputHash,
- Coinbase: f.wipBatch.coinbase,
- Timestamp_V1: f.wipBatch.timestamp,
- Caller: stateMetrics.SequencerCallerLabel,
+ BatchNumber: f.wipBatch.batchNumber,
+ OldStateRoot: f.wipBatch.stateRoot,
+ OldAccInputHash: f.wipBatch.accInputHash,
+ Coinbase: f.wipBatch.coinbase,
+ L1InfoRoot_V2: mockL1InfoRoot,
+ TimestampLimit_V2: uint64(f.wipL2Block.timestamp.Unix()),
+ Caller: stateMetrics.SequencerCallerLabel,
}
- forkID := f.dbManager.GetForkIDByBatchNumber(executorBatchRequest.BatchNumber)
-
- // TODO: Check if we need to update the L1InfoRoot_V2
- if f.wipBatch.isEmpty() {
- executorBatchRequest.GlobalExitRoot_V1 = f.wipBatch.globalExitRoot
- } else {
- executorBatchRequest.GlobalExitRoot_V1 = state.ZeroHash
- }
+ executorBatchRequest.Transactions = f.dbManager.BuildChangeL2Block(f.wipL2Block.deltaTimestamp, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex)
+ executorBatchRequest.SkipWriteBlockInfoRoot_V2 = true
+ executorBatchRequest.SkipFirstChangeL2Block_V2 = !f.wipBatch.isEmpty()
hashStr := "nil"
if tx != nil {
- executorBatchRequest.Transactions = tx.RawTx
+ executorBatchRequest.Transactions = append(executorBatchRequest.Transactions, tx.RawTx...)
hashStr = tx.HashStr
txGasPrice := tx.GasPrice
@@ -752,88 +533,49 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, first
return nil, err
}
- if forkID >= state.FORKID_DRAGONFRUIT {
- executorBatchRequest.Transactions = append(executorBatchRequest.Transactions, effectivePercentageAsDecodedHex...)
- }
- } else {
- executorBatchRequest.Transactions = []byte{}
+ executorBatchRequest.Transactions = append(executorBatchRequest.Transactions, effectivePercentageAsDecodedHex...)
}
- if forkID < state.FORKID_ETROG {
- //TODO: unify log.Infof in this section in only one
- log.Infof("processTransaction: single tx. Batch.BatchNumber: %d, BatchNumber: %d, OldStateRoot: %s, txHash: %s, GER: %s", f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, executorBatchRequest.OldStateRoot, hashStr, executorBatchRequest.GlobalExitRoot_V1.String())
- processBatchResponse, err := f.executor.ProcessBatch(ctx, executorBatchRequest, true)
- if err != nil && errors.Is(err, runtime.ErrExecutorDBError) {
- log.Errorf("failed to process transaction: %s", err)
- return nil, err
- } else if err == nil && !processBatchResponse.IsRomLevelError && len(processBatchResponse.BlockResponses) == 0 && tx != nil {
- err = fmt.Errorf("executor returned no errors and no responses for tx: %s", tx.HashStr)
- f.halt(ctx, err)
- } else if processBatchResponse.IsExecutorLevelError && tx != nil {
- log.Errorf("error received from executor. Error: %v", err)
- // Delete tx from the worker
- f.worker.DeleteTx(tx.Hash, tx.From)
-
- // Set tx as invalid in the pool
- errMsg := processBatchResponse.ExecutorError.Error()
- err = f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg)
- if err != nil {
- log.Errorf("failed to update status to invalid in the pool for tx: %s, err: %s", tx.Hash.String(), err)
- } else {
- metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1)
- }
- return nil, err
- }
+ log.Infof("processing batch. Batch.BatchNumber: %d, batchNumber: %d, oldStateRoot: %s, txHash: %s, L1InfoRoot: %s", f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, executorBatchRequest.OldStateRoot, hashStr, executorBatchRequest.L1InfoRoot_V2.String())
+ processBatchResponse, err := f.executor.ProcessBatchV2(ctx, executorBatchRequest, true)
+ if err != nil && errors.Is(err, runtime.ErrExecutorDBError) {
+ log.Errorf("failed to process transaction: %s", err)
+ return nil, err
+ } else if err == nil && !processBatchResponse.IsRomLevelError && len(processBatchResponse.BlockResponses) == 0 && tx != nil {
+ err = fmt.Errorf("executor returned no errors and no responses for tx: %s", tx.HashStr)
+ f.halt(ctx, err)
+ } else if processBatchResponse.IsExecutorLevelError && tx != nil {
+ log.Errorf("error received from executor. Error: %v", err)
+ // Delete tx from the worker
+ f.worker.DeleteTx(tx.Hash, tx.From)
- oldStateRoot := f.wipBatch.stateRoot
- if len(processBatchResponse.BlockResponses) > 0 && tx != nil {
- errWg, err = f.handleProcessTransactionResponse(ctx, tx, processBatchResponse, oldStateRoot)
- if err != nil {
- return errWg, err
- }
- }
- // Update in-memory batch
- f.wipBatch.stateRoot = processBatchResponse.NewStateRoot
- f.wipBatch.localExitRoot = processBatchResponse.NewLocalExitRoot
- log.Infof("processTransaction: data loaded in memory. batch.batchNumber: %d, batchNumber: %d, result.NewStateRoot: %s, result.NewLocalExitRoot: %s, oldStateRoot: %s", f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, processBatchResponse.NewStateRoot.String(), processBatchResponse.NewLocalExitRoot.String(), oldStateRoot.String())
- } else {
- // TODO: Clean duplicated code
- log.Infof("processTransaction: single tx. Batch.BatchNumber: %d, BatchNumber: %d, OldStateRoot: %s, txHash: %s, L1InfoRoot: %s", f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, executorBatchRequest.OldStateRoot, hashStr, executorBatchRequest.L1InfoRoot_V2.String())
- processBatchResponse, err := f.executor.ProcessBatchV2(ctx, executorBatchRequest, true)
- if err != nil && errors.Is(err, runtime.ErrExecutorDBError) {
- log.Errorf("failed to process transaction: %s", err)
- return nil, err
- } else if err == nil && !processBatchResponse.IsRomLevelError && len(processBatchResponse.BlockResponses) == 0 && tx != nil {
- err = fmt.Errorf("executor returned no errors and no responses for tx: %s", tx.HashStr)
- f.halt(ctx, err)
- } else if processBatchResponse.IsExecutorLevelError && tx != nil {
- log.Errorf("error received from executor. Error: %v", err)
- // Delete tx from the worker
- f.worker.DeleteTx(tx.Hash, tx.From)
-
- // Set tx as invalid in the pool
- errMsg := processBatchResponse.ExecutorError.Error()
- err = f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg)
- if err != nil {
- log.Errorf("failed to update status to invalid in the pool for tx: %s, err: %s", tx.Hash.String(), err)
- } else {
- metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1)
- }
- return nil, err
+ // Set tx as invalid in the pool
+ errMsg := processBatchResponse.ExecutorError.Error()
+ err = f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg)
+ if err != nil {
+ log.Errorf("failed to update status to invalid in the pool for tx: %s, err: %s", tx.Hash.String(), err)
+ } else {
+ metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1)
}
+ return nil, err
+ }
- oldStateRoot := f.wipBatch.stateRoot
- if len(processBatchResponse.BlockResponses) > 0 && tx != nil {
- errWg, err = f.handleProcessTransactionResponse(ctx, tx, processBatchResponse, oldStateRoot)
- if err != nil {
- return errWg, err
- }
+ oldStateRoot := f.wipBatch.stateRoot
+ if len(processBatchResponse.BlockResponses) > 0 && tx != nil {
+ errWg, err = f.handleProcessTransactionResponse(ctx, tx, processBatchResponse, oldStateRoot)
+ if err != nil {
+ return errWg, err
}
- // Update in-memory batch
- f.wipBatch.stateRoot = processBatchResponse.NewStateRoot
- f.wipBatch.localExitRoot = processBatchResponse.NewLocalExitRoot
- log.Infof("processTransaction: data loaded in memory. batch.batchNumber: %d, batchNumber: %d, result.NewStateRoot: %s, result.NewLocalExitRoot: %s, oldStateRoot: %s", f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, processBatchResponse.NewStateRoot.String(), processBatchResponse.NewLocalExitRoot.String(), oldStateRoot.String())
}
+
+ // Update wip batch
+ f.wipBatch.stateRoot = processBatchResponse.NewStateRoot
+ f.wipBatch.localExitRoot = processBatchResponse.NewLocalExitRoot
+ f.wipBatch.accInputHash = processBatchResponse.NewAccInputHash
+
+ log.Infof("batch processed. Batch.batchNumber: %d, batchNumber: %d, newStateRoot: %s, newLocalExitRoot: %s, oldStateRoot: %s",
+ f.wipBatch.batchNumber, executorBatchRequest.BatchNumber, processBatchResponse.NewStateRoot.String(), processBatchResponse.NewLocalExitRoot.String(), oldStateRoot.String())
+
return nil, nil
}
@@ -906,24 +648,11 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
tx.EGPLog.ValueFinal, tx.EGPLog.ValueFirst, tx.EGPLog.ValueSecond, tx.EGPLog.Percentage, tx.EGPLog.FinalDeviation, tx.EGPLog.MaxDeviation, tx.EGPLog.GasUsedFirst, tx.EGPLog.GasUsedSecond,
tx.EGPLog.GasPrice, tx.EGPLog.L1GasPrice, tx.EGPLog.L2GasPrice, tx.EGPLog.Reprocess, tx.EGPLog.GasPriceOC, tx.EGPLog.BalanceOC, egpEnabled, len(tx.RawTx), tx.HashStr, tx.EGPLog.Error)
- txToStore := transactionToStore{
- hash: tx.Hash,
- from: tx.From,
- response: result.BlockResponses[0].TransactionResponses[0],
- batchResponse: result,
- batchNumber: f.wipBatch.batchNumber,
- timestamp: f.wipBatch.timestamp,
- coinbase: f.wipBatch.coinbase,
- oldStateRoot: oldStateRoot,
- isForcedBatch: false,
- flushId: result.FlushID,
- egpLog: &tx.EGPLog,
- }
+ tx.FlushId = result.FlushID
+ f.wipL2Block.addTx(tx)
f.updateLastPendingFlushID(result.FlushID)
- f.addPendingTxToStore(ctx, txToStore)
-
f.wipBatch.countOfTxs++
f.updateWorkerAfterSuccessfulProcessing(ctx, tx.Hash, tx.From, false, result)
@@ -933,8 +662,28 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx
// handleProcessForcedTxsResponse handles the transactions responses for the processed forced batch.
func (f *finalizer) handleProcessForcedTxsResponse(ctx context.Context, request state.ProcessRequest, result *state.ProcessBatchResponse, oldStateRoot common.Hash) {
- log.Infof("handleForcedTxsProcessResp: batchNumber: %d, oldStateRoot: %s, newStateRoot: %s", request.BatchNumber, oldStateRoot.String(), result.NewStateRoot.String())
+ /*log.Infof("handleForcedTxsProcessResp: batchNumber: %d, oldStateRoot: %s, newStateRoot: %s", request.BatchNumber, oldStateRoot.String(), result.NewStateRoot.String())
+ parentBlockHash := f.wipL2Block.parentHash
for _, blockResp := range result.BlockResponses {
+ if blockResp.BlockNumber != f.wipL2Block.number {
+ log.Fatalf("L2 block number mismatch when processing forced batch block response. blockResp.BlockNumber: %d, f.wipL2Block,number: %d", blockResp.BlockNumber, f.wipL2Block.number)
+ return
+ }
+
+ l2BlockToStore := l2BlockToStore{
+ l2Block: &L2Block{
+ number: blockResp.BlockNumber,
+ hash: blockResp.BlockHash,
+ parentHash: parentBlockHash,
+ timestamp: time.Unix(int64(blockResp.Timestamp), 0),
+ transactions: []transactionToStore{},
+ },
+ batchNumber: request.BatchNumber,
+ forcedBatch: true,
+ coinbase: request.Coinbase,
+ stateRoot: oldStateRoot,
+ flushId: result.FlushID,
+ }
for _, txResp := range blockResp.TransactionResponses {
// Handle Transaction Error
if txResp.RomError != nil {
@@ -952,6 +701,7 @@ func (f *finalizer) handleProcessForcedTxsResponse(ctx context.Context, request
log.Warnf("handleForcedTxsProcessResp: failed to get sender for tx (%s): %v", txResp.TxHash, err)
}
+ //TODO: How to manage L2 block for forced batch/txs
txToStore := transactionToStore{
hash: txResp.TxHash,
from: from,
@@ -975,7 +725,7 @@ func (f *finalizer) handleProcessForcedTxsResponse(ctx context.Context, request
f.updateWorkerAfterSuccessfulProcessing(ctx, txResp.TxHash, from, true, result)
}
}
- }
+ }*/
}
// compareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice.
@@ -1022,21 +772,6 @@ func (f *finalizer) compareTxEffectiveGasPrice(ctx context.Context, tx *TxTracke
return nil
}
-// storeProcessedTx stores the processed transaction in the database.
-func (f *finalizer) storeProcessedTx(ctx context.Context, txToStore transactionToStore) {
- if txToStore.response != nil {
- log.Infof("storeProcessedTx: storing processed txToStore: %s", txToStore.response.TxHash.String())
- } else {
- log.Info("storeProcessedTx: storing processed txToStore")
- }
- err := f.dbManager.StoreProcessedTxAndDeleteFromPool(ctx, txToStore)
- if err != nil {
- log.Info("halting the finalizer because of a database error on storing processed transaction")
- f.halt(ctx, err)
- }
- metrics.TxProcessed(metrics.TxProcessedLabelSuccessful, 1)
-}
-
func (f *finalizer) updateWorkerAfterSuccessfulProcessing(ctx context.Context, txHash common.Hash, txFrom common.Address, isForced bool, result *state.ProcessBatchResponse) {
// Delete the transaction from the worker
if isForced {
@@ -1167,14 +902,17 @@ func (f *finalizer) processForcedBatches(ctx context.Context, lastBatchNumberInS
}
func (f *finalizer) processForcedBatch(ctx context.Context, lastBatchNumberInState uint64, stateRoot common.Hash, forcedBatch state.ForcedBatch) (uint64, common.Hash) {
+ //TODO: review this request for forced txs
executorBatchRequest := state.ProcessRequest{
- BatchNumber: lastBatchNumberInState + 1,
- OldStateRoot: stateRoot,
- GlobalExitRoot_V1: forcedBatch.GlobalExitRoot,
- Transactions: forcedBatch.RawTxsData,
- Coinbase: f.sequencerAddress,
- Timestamp_V1: now(),
- Caller: stateMetrics.SequencerCallerLabel,
+ BatchNumber: lastBatchNumberInState + 1,
+ OldStateRoot: stateRoot,
+ L1InfoRoot_V2: forcedBatch.GlobalExitRoot,
+ Transactions: forcedBatch.RawTxsData,
+ Coinbase: f.sequencerAddress,
+ TimestampLimit_V2: uint64(forcedBatch.ForcedAt.Unix()), //TODO: review this is the TimeStampLimit we need to use
+ SkipFirstChangeL2Block_V2: false,
+ SkipWriteBlockInfoRoot_V2: false,
+ Caller: stateMetrics.SequencerCallerLabel,
}
response, err := f.dbManager.ProcessForcedBatch(forcedBatch.ForcedBatchNumber, executorBatchRequest)
@@ -1203,6 +941,7 @@ func (f *finalizer) processForcedBatch(ctx context.Context, lastBatchNumberInSta
f.handleProcessForcedTxsResponse(ctx, executorBatchRequest, response, stateRoot)
} else {
if f.streamServer != nil && f.currentGERHash != forcedBatch.GlobalExitRoot {
+ //TODO: review this datastream event
updateGer := state.DSUpdateGER{
BatchNumber: executorBatchRequest.BatchNumber,
Timestamp: executorBatchRequest.Timestamp_V1.Unix(),
@@ -1235,99 +974,21 @@ func (f *finalizer) processForcedBatch(ctx context.Context, lastBatchNumberInSta
return lastBatchNumberInState, stateRoot
}
-// openNewWIPBatch opens a new batch in the state and returns it as WipBatch
-func (f *finalizer) openNewWIPBatch(ctx context.Context, batchNum uint64, ger, stateRoot common.Hash) (*WipBatch, error) {
- dbTx, err := f.dbManager.BeginStateTransaction(ctx)
- if err != nil {
- return nil, fmt.Errorf("failed to begin state transaction to open batch, err: %w", err)
- }
-
- // open next batch
- openBatchResp, err := f.openBatch(ctx, batchNum, ger, dbTx)
- if err != nil {
- if rollbackErr := dbTx.Rollback(ctx); rollbackErr != nil {
- return nil, fmt.Errorf(
- "failed to rollback dbTx: %s. Rollback err: %w",
- rollbackErr.Error(), err,
- )
- }
- return nil, err
- }
- if err := dbTx.Commit(ctx); err != nil {
- return nil, fmt.Errorf("failed to commit database transaction for opening a batch, err: %w", err)
- }
-
- // Check if synchronizer is up-to-date
- for !f.isSynced(ctx) {
- log.Info("wait for synchronizer to sync last batch")
- time.Sleep(time.Second)
- }
-
- return &WipBatch{
- batchNumber: batchNum,
- coinbase: f.sequencerAddress,
- initialStateRoot: stateRoot,
- stateRoot: stateRoot,
- timestamp: openBatchResp.Timestamp,
- globalExitRoot: ger,
- remainingResources: getMaxRemainingResources(f.batchConstraints),
- closingReason: state.EmptyClosingReason,
- }, err
-}
-
-// closeWIPBatch closes the current batch in the state
-func (f *finalizer) closeWIPBatch(ctx context.Context) error {
- transactions, effectivePercentages, err := f.dbManager.GetTransactionsByBatchNumber(ctx, f.wipBatch.batchNumber)
- if err != nil {
- return fmt.Errorf("failed to get transactions from transactions, err: %w", err)
- }
- for i, tx := range transactions {
- log.Infof("closeWIPBatch: BatchNum: %d, Tx position: %d, txHash: %s", f.wipBatch.batchNumber, i, tx.Hash().String())
- }
- usedResources := getUsedBatchResources(f.batchConstraints, f.wipBatch.remainingResources)
- receipt := ClosingBatchParameters{
- BatchNumber: f.wipBatch.batchNumber,
- StateRoot: f.wipBatch.stateRoot,
- LocalExitRoot: f.wipBatch.localExitRoot,
- Txs: transactions,
- EffectivePercentages: effectivePercentages,
- BatchResources: usedResources,
- ClosingReason: f.wipBatch.closingReason,
- }
- return f.dbManager.CloseBatch(ctx, receipt)
-}
-
-// openBatch opens a new batch in the state
-func (f *finalizer) openBatch(ctx context.Context, num uint64, ger common.Hash, dbTx pgx.Tx) (state.ProcessingContext, error) {
- processingCtx := state.ProcessingContext{
- BatchNumber: num,
- Coinbase: f.sequencerAddress,
- Timestamp: now(),
- GlobalExitRoot: ger,
- }
- err := f.dbManager.OpenBatch(ctx, processingCtx, dbTx)
- if err != nil {
- return state.ProcessingContext{}, fmt.Errorf("failed to open new batch, err: %w", err)
- }
-
- return processingCtx, nil
-}
-
// reprocessFullBatch reprocesses a batch used as sanity check
func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, initialStateRoot common.Hash, expectedNewStateRoot common.Hash) (*state.ProcessBatchResponse, error) {
reprocessError := func(batch *state.Batch, txs []ethereumTypes.Transaction) {
f.reprocessFullBatchError.Store(true)
// Log batch detailed info
- log.Infof("reprocessFullBatch: BatchNumber: %d, OldStateRoot: %s, ExpectedNewStateRoot: %s, GER: %s", batch.BatchNumber, initialStateRoot.String(), expectedNewStateRoot.String(), batch.GlobalExitRoot.String())
+ log.Infof("[reprocessFullBatch] BatchNumber: %d, InitialStateRoot: %s, ExpectedNewStateRoot: %s, GER: %s", batch.BatchNumber, initialStateRoot.String(), expectedNewStateRoot.String(), batch.GlobalExitRoot.String())
for i, tx := range txs {
- log.Infof("reprocessFullBatch: BatchNumber: %d, Tx position %d, Tx Hash: %s", batch.BatchNumber, i, tx.Hash())
+ log.Infof("[reprocessFullBatch] BatchNumber: %d, tx position %d, tx hash: %s", batch.BatchNumber, i, tx.Hash())
}
}
batch, err := f.dbManager.GetBatchByNumber(ctx, batchNum, nil)
if err != nil {
- log.Errorf("reprocessFullBatch: failed to get batch %d, err: %v", batchNum, err)
+ log.Errorf("[reprocessFullBatch] failed to get batch %d, err: %v", batchNum, err)
f.reprocessFullBatchError.Store(true)
return nil, ErrGetBatchByNumber
}
@@ -1337,7 +998,7 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, ini
caller = stateMetrics.SequencerCallerLabel
}
- // TODO: L1InfoRoot
+ // TODO: review this request for reprocess full batch
executorBatchRequest := state.ProcessRequest{
BatchNumber: batch.BatchNumber,
GlobalExitRoot_V1: batch.GlobalExitRoot,
@@ -1351,39 +1012,33 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, ini
forkID := f.dbManager.GetForkIDByBatchNumber(batchNum)
txs, _, _, err := state.DecodeTxs(batch.BatchL2Data, forkID)
if err != nil {
- log.Errorf("reprocessFullBatch: error decoding BatchL2Data for batch %d. Error: %v", batch.BatchNumber, err)
+ log.Errorf("[reprocessFullBatch] error decoding BatchL2Data for batch %d. Error: %v", batch.BatchNumber, err)
reprocessError(batch, []ethereumTypes.Transaction{})
return nil, ErrDecodeBatchL2Data
}
- for i, tx := range txs {
- log.Infof("reprocessFullBatch: BatchNumber: %d, Tx position %d, Tx Hash: %s", batch.BatchNumber, i, tx.Hash())
- }
var result *state.ProcessBatchResponse
- if forkID < state.FORKID_ETROG {
- result, err = f.executor.ProcessBatch(ctx, executorBatchRequest, false)
- if err != nil {
- log.Errorf("reprocessFullBatch: failed to process batch %d. Error: %s", batch.BatchNumber, err)
- reprocessError(batch, txs)
- return nil, ErrProcessBatch
- }
- } else {
- result, err = f.executor.ProcessBatchV2(ctx, executorBatchRequest, false)
- if err != nil {
- log.Errorf("reprocessFullBatch: failed to process batch %d. Error: %s", batch.BatchNumber, err)
- reprocessError(batch, txs)
- return nil, ErrProcessBatch
- }
+ result, err = f.executor.ProcessBatchV2(ctx, executorBatchRequest, false)
+ if err != nil {
+ log.Errorf("[reprocessFullBatch] failed to process batch %d. Error: %s", batch.BatchNumber, err)
+ reprocessError(batch, txs)
+ return nil, ErrProcessBatch
+ }
+
+ if result.ExecutorError != nil {
+ log.Errorf("[reprocessFullBatch] executor error when reprocessing batch %d, error: %v", batch.BatchNumber, result.ExecutorError)
+ reprocessError(batch, txs)
+ return nil, ErrExecutorError
}
if result.IsRomOOCError {
- log.Errorf("reprocessFullBatch: failed to process batch %d because OutOfCounters", batch.BatchNumber)
+ log.Errorf("[reprocessFullBatch] failed to process batch %d because OutOfCounters", batch.BatchNumber)
reprocessError(batch, txs)
payload, err := json.Marshal(executorBatchRequest)
if err != nil {
- log.Errorf("reprocessFullBatch: error marshaling payload: %v", err)
+ log.Errorf("[reprocessFullBatch] error marshaling payload: %v", err)
} else {
event := &event.Event{
ReceivedAt: time.Now(),
@@ -1396,7 +1051,7 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, ini
}
err = f.eventLog.LogEvent(ctx, event)
if err != nil {
- log.Errorf("reprocessFullBatch: error storing payload: %v", err)
+ log.Errorf("[reprocessFullBatch] error storing payload: %v", err)
}
}
@@ -1404,18 +1059,12 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, ini
}
if result.NewStateRoot != expectedNewStateRoot {
- log.Errorf("reprocessFullBatch: new state root mismatch for batch %d, expected: %s, got: %s", batch.BatchNumber, expectedNewStateRoot.String(), result.NewStateRoot.String())
+ log.Errorf("[reprocessFullBatch] new state root mismatch for batch %d, expected: %s, got: %s", batch.BatchNumber, expectedNewStateRoot.String(), result.NewStateRoot.String())
reprocessError(batch, txs)
return nil, ErrStateRootNoMatch
}
- if result.ExecutorError != nil {
- log.Errorf("reprocessFullBatch: executor error when reprocessing batch %d, error: %v", batch.BatchNumber, result.ExecutorError)
- reprocessError(batch, txs)
- return nil, ErrExecutorError
- }
-
- log.Infof("reprocessFullBatch: reprocess successfully done for batch %d", batch.BatchNumber)
+ log.Infof("[reprocessFullBatch]: reprocess successfully done for batch %d", batch.BatchNumber)
return result, nil
}
@@ -1423,18 +1072,18 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, ini
func (f *finalizer) isDeadlineEncountered() bool {
// Forced batch deadline
if f.nextForcedBatchDeadline != 0 && now().Unix() >= f.nextForcedBatchDeadline {
- log.Infof("closing batch: %d, forced batch deadline encountered.", f.wipBatch.batchNumber)
+ log.Infof("closing batch %d, forced batch deadline encountered.", f.wipBatch.batchNumber)
return true
}
// Global Exit Root deadline
if f.nextGERDeadline != 0 && now().Unix() >= f.nextGERDeadline {
- log.Infof("closing batch: %d, GER deadline encountered.", f.wipBatch.batchNumber)
+ log.Infof("closing batch %d, GER deadline encountered.", f.wipBatch.batchNumber)
f.wipBatch.closingReason = state.GlobalExitRootDeadlineClosingReason
return true
}
// Timestamp resolution deadline
if !f.wipBatch.isEmpty() && f.wipBatch.timestamp.Add(f.cfg.TimestampResolution.Duration).Before(time.Now()) {
- log.Infof("closing batch: %d, because of timestamp resolution.", f.wipBatch.batchNumber)
+ log.Infof("closing batch %d, because of timestamp resolution.", f.wipBatch.batchNumber)
f.wipBatch.closingReason = state.TimeoutResolutionDeadlineClosingReason
return true
}
@@ -1460,7 +1109,7 @@ func (f *finalizer) checkRemainingResources(result *state.ProcessBatchResponse,
err := f.wipBatch.remainingResources.Sub(usedResources)
if err != nil {
- log.Infof("current transaction exceeds the batch limit, updating metadata for tx in worker and continuing")
+ log.Infof("current transaction exceeds the remaining batch resources, updating metadata for tx in worker and continuing")
start := time.Now()
f.worker.UpdateTxZKCounters(result.BlockResponses[0].TransactionResponses[0].TxHash, tx.From, usedResources.ZKCounters)
metrics.WorkerProcessingTime(time.Since(start))
@@ -1470,8 +1119,8 @@ func (f *finalizer) checkRemainingResources(result *state.ProcessBatchResponse,
return nil
}
-// isBatchResourcesFull checks if one of resources of the wip batch has reached the max value
-func (f *finalizer) isBatchResourcesFull() bool {
+// isBatchResourcesExhausted checks if one of resources of the wip batch has reached the max value
+func (f *finalizer) isBatchResourcesExhausted() bool {
resources := f.wipBatch.remainingResources
zkCounters := resources.ZKCounters
result := false
@@ -1503,13 +1152,46 @@ func (f *finalizer) isBatchResourcesFull() bool {
}
if result {
- log.Infof("closing batch: %d, because it reached %s threshold limit", f.wipBatch.batchNumber, resourceDesc)
+ log.Infof("closing batch %d, because it reached %s limit", f.wipBatch.batchNumber, resourceDesc)
f.wipBatch.closingReason = state.BatchAlmostFullClosingReason
}
return result
}
+// getConstraintThresholdUint64 returns the threshold for the given input
+func (f *finalizer) getConstraintThresholdUint64(input uint64) uint64 {
+ return input * uint64(f.cfg.ResourcePercentageToCloseBatch) / 100 //nolint:gomnd
+}
+
+// getConstraintThresholdUint32 returns the threshold for the given input
+func (f *finalizer) getConstraintThresholdUint32(input uint32) uint32 {
+ return uint32(input*f.cfg.ResourcePercentageToCloseBatch) / 100 //nolint:gomnd
+}
+
+// halt halts the finalizer
+func (f *finalizer) halt(ctx context.Context, err error) {
+ event := &event.Event{
+ ReceivedAt: time.Now(),
+ Source: event.Source_Node,
+ Component: event.Component_Sequencer,
+ Level: event.Level_Critical,
+ EventID: event.EventID_FinalizerHalt,
+ Description: fmt.Sprintf("finalizer halted due to error: %s", err),
+ }
+
+ eventErr := f.eventLog.LogEvent(ctx, event)
+ if eventErr != nil {
+ log.Errorf("error storing finalizer halt event: %v", eventErr)
+ }
+
+ for {
+ log.Errorf("fatal error: %s", err)
+ log.Error("halting the finalizer")
+ time.Sleep(5 * time.Second) //nolint:gomnd
+ }
+}
+
// getUsedBatchResources returns the max resources that can be used in a batch
func getUsedBatchResources(constraints state.BatchConstraintsCfg, remainingResources state.BatchResources) state.BatchResources {
return state.BatchResources{
@@ -1543,13 +1225,3 @@ func getMaxRemainingResources(constraints state.BatchConstraintsCfg) state.Batch
Bytes: constraints.MaxBatchBytesSize,
}
}
-
-// getConstraintThresholdUint64 returns the threshold for the given input
-func (f *finalizer) getConstraintThresholdUint64(input uint64) uint64 {
- return input * uint64(f.cfg.ResourcePercentageToCloseBatch) / 100 //nolint:gomnd
-}
-
-// getConstraintThresholdUint32 returns the threshold for the given input
-func (f *finalizer) getConstraintThresholdUint32(input uint32) uint32 {
- return uint32(input*f.cfg.ResourcePercentageToCloseBatch) / 100 //nolint:gomnd
-}
diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go
index d15fff755c..406999ecdf 100644
--- a/sequencer/finalizer_test.go
+++ b/sequencer/finalizer_test.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"math/big"
- "strings"
"sync"
"testing"
"time"
@@ -17,13 +16,10 @@ import (
"github.com/0xPolygonHermez/zkevm-node/pool"
"github.com/0xPolygonHermez/zkevm-node/state"
stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics"
- "github.com/0xPolygonHermez/zkevm-node/state/runtime"
"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/0xPolygonHermez/zkevm-node/test/constants"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -38,6 +34,7 @@ var (
nilErr error
dbManagerMock = new(DbManagerMock)
executorMock = new(StateMock)
+ ethermanMock = new(EthermanMock)
workerMock = new(WorkerMock)
dbTxMock = new(DbTxMock)
bc = state.BatchConstraintsCfg{
@@ -134,7 +131,7 @@ func TestNewFinalizer(t *testing.T) {
dbManagerMock.On("GetLastSentFlushID", context.Background()).Return(uint64(0), nil)
// arrange and act
- f = newFinalizer(cfg, poolCfg, workerMock, dbManagerMock, executorMock, seqAddr, isSynced, closingSignalCh, bc, eventLog, nil)
+ f = newFinalizer(cfg, poolCfg, workerMock, dbManagerMock, executorMock, ethermanMock, seqAddr, isSynced, closingSignalCh, bc, eventLog, nil)
// assert
assert.NotNil(t, f)
@@ -147,7 +144,7 @@ func TestNewFinalizer(t *testing.T) {
assert.Equal(t, f.batchConstraints, bc)
}
-func TestFinalizer_handleProcessTransactionResponse(t *testing.T) {
+/*func TestFinalizer_handleProcessTransactionResponse(t *testing.T) {
f = setupFinalizer(true)
ctx = context.Background()
txTracker := &TxTracker{
@@ -317,14 +314,14 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storedTxs := make([]transactionToStore, 0)
- f.pendingTransactionsToStore = make(chan transactionToStore)
+ f.pendingL2BlocksToStore = make(chan transactionToStore)
if tc.expectedStoredTx.batchResponse != nil {
done = make(chan bool) // init a new done channel
go func() {
- for tx := range f.pendingTransactionsToStore {
+ for tx := range f.pendingL2BlocksToStore {
storedTxs = append(storedTxs, tx)
- f.pendingTransactionsToStoreWG.Done()
+ f.pendingL2BlocksToStoreWG.Done()
}
done <- true // signal that the goroutine is done
}()
@@ -361,9 +358,9 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) {
}
if tc.expectedStoredTx.batchResponse != nil {
- close(f.pendingTransactionsToStore) // close the channel
- <-done // wait for the goroutine to finish
- f.pendingTransactionsToStoreWG.Wait()
+ close(f.pendingL2BlocksToStore) // close the channel
+ <-done // wait for the goroutine to finish
+ f.pendingL2BlocksToStoreWG.Wait()
require.Len(t, storedTxs, 1)
actualTx := storedTxs[0] //nolint:gosec
assertEqualTransactionToStore(t, tc.expectedStoredTx, actualTx)
@@ -375,19 +372,19 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) {
dbManagerMock.AssertExpectations(t)
})
}
-}
+}*/
-func assertEqualTransactionToStore(t *testing.T, expectedTx, actualTx transactionToStore) {
- require.Equal(t, expectedTx.from, actualTx.from)
- require.Equal(t, expectedTx.hash, actualTx.hash)
- require.Equal(t, expectedTx.response, actualTx.response)
- require.Equal(t, expectedTx.batchNumber, actualTx.batchNumber)
- require.Equal(t, expectedTx.timestamp, actualTx.timestamp)
- require.Equal(t, expectedTx.coinbase, actualTx.coinbase)
- require.Equal(t, expectedTx.oldStateRoot, actualTx.oldStateRoot)
- require.Equal(t, expectedTx.isForcedBatch, actualTx.isForcedBatch)
- require.Equal(t, expectedTx.flushId, actualTx.flushId)
-}
+/*func assertEqualTransactionToStore(t *testing.T, expectedTx, actualTx transactionToStore) {
+ require.Equal(t, expectedTx.from, actualTx.from)
+ require.Equal(t, expectedTx.hash, actualTx.hash)
+ require.Equal(t, expectedTx.response, actualTx.response)
+ require.Equal(t, expectedTx.batchNumber, actualTx.batchNumber)
+ require.Equal(t, expectedTx.timestamp, actualTx.timestamp)
+ require.Equal(t, expectedTx.coinbase, actualTx.coinbase)
+ require.Equal(t, expectedTx.oldStateRoot, actualTx.oldStateRoot)
+ require.Equal(t, expectedTx.isForcedBatch, actualTx.isForcedBatch)
+ require.Equal(t, expectedTx.flushId, actualTx.flushId)
+}*/
func TestFinalizer_newWIPBatch(t *testing.T) {
// arrange
@@ -407,7 +404,7 @@ func TestFinalizer_newWIPBatch(t *testing.T) {
txs := []types.Transaction{*tx}
require.NoError(t, err)
newBatchNum := f.wipBatch.batchNumber + 1
- expectedNewWipBatch := &WipBatch{
+ expectedNewWipBatch := &Batch{
batchNumber: newBatchNum,
coinbase: f.sequencerAddress,
initialStateRoot: newHash,
@@ -459,7 +456,7 @@ func TestFinalizer_newWIPBatch(t *testing.T) {
closeBatchParams ClosingBatchParameters
stateRootAndLERErr error
openBatchErr error
- expectedWip *WipBatch
+ expectedWip *Batch
reprocessFullBatchResponse *state.ProcessBatchResponse
expectedErr error
reprocessBatchErr error
@@ -614,7 +611,7 @@ func TestFinalizer_newWIPBatch(t *testing.T) {
}
}
-func TestFinalizer_processForcedBatches(t *testing.T) {
+/*func TestFinalizer_processForcedBatches(t *testing.T) {
var err error
f = setupFinalizer(false)
now = testNow
@@ -781,13 +778,13 @@ func TestFinalizer_processForcedBatches(t *testing.T) {
var newStateRoot common.Hash
stateRoot := oldHash
storedTxs := make([]transactionToStore, 0)
- f.pendingTransactionsToStore = make(chan transactionToStore)
+ f.pendingL2BlocksToStore = make(chan transactionToStore)
if tc.expectedStoredTx != nil && len(tc.expectedStoredTx) > 0 {
done = make(chan bool) // init a new done channel
go func() {
- for tx := range f.pendingTransactionsToStore {
+ for tx := range f.pendingL2BlocksToStore {
storedTxs = append(storedTxs, tx)
- f.pendingTransactionsToStoreWG.Done()
+ f.pendingL2BlocksToStoreWG.Done()
}
done <- true // signal that the goroutine is done
}()
@@ -849,9 +846,9 @@ func TestFinalizer_processForcedBatches(t *testing.T) {
assert.EqualError(t, err, tc.expectedErr.Error())
} else {
if tc.expectedStoredTx != nil && len(tc.expectedStoredTx) > 0 {
- close(f.pendingTransactionsToStore) // ensure the channel is closed
- <-done // wait for the goroutine to finish
- f.pendingTransactionsToStoreWG.Wait()
+ close(f.pendingL2BlocksToStore) // ensure the channel is closed
+ <-done // wait for the goroutine to finish
+ f.pendingL2BlocksToStoreWG.Wait()
for i := range tc.expectedStoredTx {
require.Equal(t, tc.expectedStoredTx[i], storedTxs[i])
}
@@ -865,7 +862,7 @@ func TestFinalizer_processForcedBatches(t *testing.T) {
}
})
}
-}
+}*/
func TestFinalizer_openWIPBatch(t *testing.T) {
// arrange
@@ -875,7 +872,7 @@ func TestFinalizer_openWIPBatch(t *testing.T) {
now = time.Now
}()
batchNum := f.wipBatch.batchNumber + 1
- expectedWipBatch := &WipBatch{
+ expectedWipBatch := &Batch{
batchNumber: batchNum,
coinbase: f.sequencerAddress,
initialStateRoot: oldHash,
@@ -890,7 +887,7 @@ func TestFinalizer_openWIPBatch(t *testing.T) {
beginTxErr error
commitErr error
rollbackErr error
- expectedWip *WipBatch
+ expectedWip *Batch
expectedErr error
}{
{
@@ -1011,65 +1008,6 @@ func TestFinalizer_closeBatch(t *testing.T) {
}
}
-func TestFinalizer_openBatch(t *testing.T) {
- // arrange
- f = setupFinalizer(true)
- now = testNow
- defer func() {
- now = time.Now
- }()
- batchNum := f.wipBatch.batchNumber + 1
- testCases := []struct {
- name string
- batchNum uint64
- managerErr error
- expectedCtx state.ProcessingContext
- expectedErr error
- }{
- {
- name: "Success",
- batchNum: batchNum,
- managerErr: nil,
- expectedCtx: state.ProcessingContext{
- BatchNumber: batchNum,
- Coinbase: f.sequencerAddress,
- Timestamp: now(),
- GlobalExitRoot: oldHash,
- },
- expectedErr: nil,
- },
- {
- name: "Error Manager",
- batchNum: batchNum,
- managerErr: testErr,
- expectedCtx: state.ProcessingContext{},
- expectedErr: openBatchError,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- // arrange
- dbManagerMock.Mock.On("OpenBatch", mock.Anything, mock.Anything, mock.Anything).Return(tc.managerErr).Once()
-
- // act
- actualCtx, err := f.openBatch(ctx, tc.batchNum, oldHash, nil)
-
- // assert
- if tc.expectedErr != nil {
- assert.Error(t, err)
- assert.EqualError(t, err, tc.expectedErr.Error())
- assert.ErrorIs(t, err, tc.managerErr)
- assert.Empty(t, actualCtx)
- } else {
- assert.NoError(t, err)
- assert.Equal(t, tc.expectedCtx, actualCtx)
- }
- dbManagerMock.AssertExpectations(t)
- })
- }
-}
-
func TestFinalizer_isDeadlineEncountered(t *testing.T) {
// arrange
f = setupFinalizer(true)
@@ -1301,7 +1239,7 @@ func TestFinalizer_handleTransactionError(t *testing.T) {
}
}
-func Test_processTransaction(t *testing.T) {
+/*func Test_processTransaction(t *testing.T) {
f = setupFinalizer(true)
gasUsed := uint64(100000)
txTracker := &TxTracker{
@@ -1415,13 +1353,13 @@ func Test_processTransaction(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storedTxs := make([]transactionToStore, 0)
- f.pendingTransactionsToStore = make(chan transactionToStore, 1)
+ f.pendingL2BlocksToStore = make(chan transactionToStore, 1)
if tc.expectedStoredTx.batchResponse != nil {
done = make(chan bool) // init a new done channel
go func() {
- for tx := range f.pendingTransactionsToStore {
+ for tx := range f.pendingL2BlocksToStore {
storedTxs = append(storedTxs, tx)
- f.pendingTransactionsToStoreWG.Done()
+ f.pendingL2BlocksToStoreWG.Done()
}
done <- true // signal that the goroutine is done
}()
@@ -1449,9 +1387,9 @@ func Test_processTransaction(t *testing.T) {
errWg, err := f.processTransaction(tc.ctx, tc.tx, true)
if tc.expectedStoredTx.batchResponse != nil {
- close(f.pendingTransactionsToStore) // ensure the channel is closed
- <-done // wait for the goroutine to finish
- f.pendingTransactionsToStoreWG.Wait()
+ close(f.pendingL2BlocksToStore) // ensure the channel is closed
+ <-done // wait for the goroutine to finish
+ f.pendingL2BlocksToStoreWG.Wait()
// require.Equal(t, tc.expectedStoredTx, storedTxs[0])
}
if tc.expectedErr != nil {
@@ -1467,9 +1405,9 @@ func Test_processTransaction(t *testing.T) {
dbManagerMock.AssertExpectations(t)
})
}
-}
+}*/
-func Test_handleForcedTxsProcessResp(t *testing.T) {
+/*func Test_handleForcedTxsProcessResp(t *testing.T) {
var chainID = new(big.Int).SetInt64(400)
var pvtKey = "0x28b2b0318721be8c8339199172cd7cc8f5e273800a35616ec893083a4b32c02e"
RawTxsData1 := make([]byte, 0, 2)
@@ -1660,13 +1598,13 @@ func Test_handleForcedTxsProcessResp(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storedTxs := make([]transactionToStore, 0)
- f.pendingTransactionsToStore = make(chan transactionToStore)
+ f.pendingL2BlocksToStore = make(chan transactionToStore)
// Mock storeProcessedTx to store txs into the storedTxs slice
go func() {
- for tx := range f.pendingTransactionsToStore {
+ for tx := range f.pendingL2BlocksToStore {
storedTxs = append(storedTxs, tx)
- f.pendingTransactionsToStoreWG.Done()
+ f.pendingL2BlocksToStoreWG.Done()
}
}()
@@ -1676,7 +1614,7 @@ func Test_handleForcedTxsProcessResp(t *testing.T) {
f.handleProcessForcedTxsResponse(ctx, tc.request, tc.result, tc.oldStateRoot)
- f.pendingTransactionsToStoreWG.Wait()
+ f.pendingL2BlocksToStoreWG.Wait()
require.Nil(t, err)
require.Equal(t, len(tc.expectedStoredTxs), len(storedTxs))
for i := 0; i < len(tc.expectedStoredTxs); i++ {
@@ -1686,9 +1624,9 @@ func Test_handleForcedTxsProcessResp(t *testing.T) {
}
})
}
-}
+}*/
-func TestFinalizer_storeProcessedTx(t *testing.T) {
+/*func TestFinalizer_storeProcessedTx(t *testing.T) {
f = setupFinalizer(false)
testCases := []struct {
name string
@@ -1756,7 +1694,7 @@ func TestFinalizer_storeProcessedTx(t *testing.T) {
dbManagerMock.AssertExpectations(t)
})
}
-}
+}*/
func TestFinalizer_updateWorkerAfterSuccessfulProcessing(t *testing.T) {
testCases := []struct {
@@ -2176,7 +2114,7 @@ func TestFinalizer_isBatchAlmostFull(t *testing.T) {
f.wipBatch.remainingResources = tc.modifyResourceFunc(maxRemainingResource)
// act
- result := f.isBatchResourcesFull()
+ result := f.isBatchResourcesExhausted()
// assert
assert.Equal(t, tc.expectedResult, result)
@@ -2333,7 +2271,7 @@ func Test_sortForcedBatches(t *testing.T) {
}
func setupFinalizer(withWipBatch bool) *finalizer {
- wipBatch := new(WipBatch)
+ wipBatch := new(Batch)
dbManagerMock = new(DbManagerMock)
executorMock = new(StateMock)
workerMock = new(WorkerMock)
@@ -2343,7 +2281,7 @@ func setupFinalizer(withWipBatch bool) *finalizer {
if err != nil {
panic(err)
}
- wipBatch = &WipBatch{
+ wipBatch = &Batch{
batchNumber: 1,
coinbase: seqAddr,
initialStateRoot: oldHash,
@@ -2372,21 +2310,23 @@ func setupFinalizer(withWipBatch bool) *finalizer {
batchConstraints: bc,
currentGERHash: common.Hash{},
// closing signals
- nextGER: common.Hash{},
- nextGERDeadline: 0,
- nextGERMux: new(sync.RWMutex),
- nextForcedBatches: make([]state.ForcedBatch, 0),
- nextForcedBatchDeadline: 0,
- nextForcedBatchesMux: new(sync.RWMutex),
- handlingL2Reorg: false,
- effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice, poolCfg.DefaultMinGasPriceAllowed),
- eventLog: eventLog,
- pendingTransactionsToStore: make(chan transactionToStore, bc.MaxTxsPerBatch*pendingTxsBufferSizeMultiplier),
- pendingTransactionsToStoreWG: new(sync.WaitGroup),
- storedFlushID: 0,
- storedFlushIDCond: sync.NewCond(new(sync.Mutex)),
- proverID: "",
- lastPendingFlushID: 0,
- pendingFlushIDCond: sync.NewCond(new(sync.Mutex)),
+ nextGER: common.Hash{},
+ nextGERDeadline: 0,
+ nextGERMux: new(sync.Mutex),
+ nextForcedBatches: make([]state.ForcedBatch, 0),
+ nextForcedBatchDeadline: 0,
+ nextForcedBatchesMux: new(sync.Mutex),
+ handlingL2Reorg: false,
+ effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice, poolCfg.DefaultMinGasPriceAllowed),
+ eventLog: eventLog,
+ pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize), //TODO: review buffer size
+ pendingL2BlocksToProcessWG: new(sync.WaitGroup),
+ pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize), //TODO: review buffer size
+ pendingL2BlocksToStoreWG: new(sync.WaitGroup),
+ storedFlushID: 0,
+ storedFlushIDCond: sync.NewCond(new(sync.Mutex)),
+ proverID: "",
+ lastPendingFlushID: 0,
+ pendingFlushIDCond: sync.NewCond(new(sync.Mutex)),
}
}
diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go
index 74ddba9e63..b9c25ae87f 100644
--- a/sequencer/interfaces.go
+++ b/sequencer/interfaces.go
@@ -71,17 +71,18 @@ type stateInterface interface {
GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Block, error)
GetLatestGlobalExitRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (state.GlobalExitRoot, time.Time, error)
GetLastL2BlockHeader(ctx context.Context, dbTx pgx.Tx) (*state.L2Header, error)
- UpdateBatchL2Data(ctx context.Context, batchNumber uint64, batchL2Data []byte, dbTx pgx.Tx) error
- UpdateBatchL2DataAndLER(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error
+ UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error
ProcessSequencerBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, caller metrics.CallerLabel, dbTx pgx.Tx) (*state.ProcessBatchResponse, error)
GetForcedBatchesSince(ctx context.Context, forcedBatchNumber, maxBlockNumber uint64, dbTx pgx.Tx) ([]*state.ForcedBatch, error)
GetLastTrustedForcedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error)
CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetLatestGer(ctx context.Context, maxBlockNumber uint64) (state.GlobalExitRoot, time.Time, error)
+ GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error)
FlushMerkleTree(ctx context.Context) error
GetStoredFlushID(ctx context.Context) (uint64, string, error)
GetForkIDByBatchNumber(batchNumber uint64) uint64
+ AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error
GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*state.DSL2Block, error)
GetDSBatches(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, readWIPBatch bool, dbTx pgx.Tx) ([]*state.DSBatch, error)
GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSL2Block, error)
@@ -89,7 +90,7 @@ type stateInterface interface {
}
type workerInterface interface {
- GetBestFittingTx(resources state.BatchResources) *TxTracker
+ GetBestFittingTx(resources state.BatchResources) (*TxTracker, error)
UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker
UpdateTxZKCounters(txHash common.Hash, from common.Address, ZKCounters state.ZKCounters)
AddTxTracker(ctx context.Context, txTracker *TxTracker) (replacedTx *TxTracker, dropReason error)
@@ -112,7 +113,7 @@ type dbManagerInterface interface {
GetLastBatchNumber(ctx context.Context) (uint64, error)
DeleteTransactionFromPool(ctx context.Context, txHash common.Hash) error
CloseBatch(ctx context.Context, params ClosingBatchParameters) error
- GetWIPBatch(ctx context.Context) (*WipBatch, error)
+ GetWIPBatch(ctx context.Context) (*Batch, error)
GetTransactionsByBatchNumber(ctx context.Context, batchNumber uint64) (txs []types.Transaction, effectivePercentages []uint8, err error)
GetLastBatch(ctx context.Context) (*state.Batch, error)
GetLastNBatches(ctx context.Context, numBatches uint) ([]*state.Batch, error)
@@ -120,12 +121,15 @@ type dbManagerInterface interface {
GetBatchByNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.Batch, error)
IsBatchClosed(ctx context.Context, batchNum uint64) (bool, error)
GetLatestGer(ctx context.Context, maxBlockNumber uint64) (state.GlobalExitRoot, time.Time, error)
+ GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error)
ProcessForcedBatch(ForcedBatchNumber uint64, request state.ProcessRequest) (*state.ProcessBatchResponse, error)
GetForcedBatchesSince(ctx context.Context, forcedBatchNumber, maxBlockNumber uint64, dbTx pgx.Tx) ([]*state.ForcedBatch, error)
GetLastL2BlockHeader(ctx context.Context, dbTx pgx.Tx) (*state.L2Header, error)
GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*state.Block, error)
+ GetLastL2Block(ctx context.Context, dbTx pgx.Tx) (*state.L2Block, error)
GetLastTrustedForcedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetBalanceByStateRoot(ctx context.Context, address common.Address, root common.Hash) (*big.Int, error)
+ UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error
UpdateTxStatus(ctx context.Context, hash common.Hash, newStatus pool.TxStatus, isWIP bool, reason *string) error
GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error)
CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64, error)
@@ -134,7 +138,9 @@ type dbManagerInterface interface {
GetDefaultMinGasPriceAllowed() uint64
GetL1AndL2GasPrice() (uint64, uint64)
GetStoredFlushID(ctx context.Context) (uint64, string, error)
- StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error
+ StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error
GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error)
GetForkIDByBatchNumber(batchNumber uint64) uint64
+ BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte
+ DSSendL2Block(l2Block *L2Block) error
}
diff --git a/sequencer/l2block.go b/sequencer/l2block.go
new file mode 100644
index 0000000000..59a7720525
--- /dev/null
+++ b/sequencer/l2block.go
@@ -0,0 +1,433 @@
+package sequencer
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/hex"
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/pool"
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var changeL2BlockMark = []byte{0x0B}
+
+// L2Block represents a wip or processed L2 block
+type L2Block struct {
+ timestamp time.Time
+ deltaTimestamp uint32
+ initialStateRoot common.Hash
+ initialAccInputHash common.Hash
+ batchNumber uint64
+ forcedBatch bool
+ coinbase common.Address
+ stateRoot common.Hash
+ l1InfoTreeExitRoot state.L1InfoTreeExitRootStorageEntry
+ transactions []*TxTracker
+ batchResponse *state.ProcessBatchResponse
+}
+
+func (b *L2Block) isEmpty() bool {
+ return len(b.transactions) == 0
+}
+
+// addTx adds a tx to the L2 block
+func (b *L2Block) addTx(tx *TxTracker) {
+ b.transactions = append(b.transactions, tx)
+}
+
+// initWIPL2Block inits the wip L2 block
+func (f *finalizer) initWIPL2Block(ctx context.Context) {
+ f.wipL2Block = &L2Block{}
+
+ // Wait to l1InfoTree to be updated for first time
+ f.lastL1InfoTreeCond.L.Lock()
+ for !f.lastL1InfoTreeValid {
+ log.Infof("waiting for L1InfoTree to be updated")
+ f.lastL1InfoTreeCond.Wait()
+ }
+ f.lastL1InfoTreeCond.L.Unlock()
+
+ f.lastL1InfoTreeMux.Lock()
+ f.wipL2Block.l1InfoTreeExitRoot = f.lastL1InfoTree
+ f.lastL1InfoTreeMux.Unlock()
+ log.Infof("L1Infotree updated. L1InfoTreeIndex: %d", f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex)
+
+ lastL2Block, err := f.dbManager.GetLastL2Block(ctx, nil)
+ if err != nil {
+ log.Fatalf("failed to get last L2 block number. Error: %w", err)
+ }
+
+ f.openNewWIPL2Block(ctx, &lastL2Block.ReceivedAt)
+}
+
+// addPendingL2BlockToProcess adds a pending L2 block that is closed and ready to be processed by the executor
+func (f *finalizer) addPendingL2BlockToProcess(ctx context.Context, l2Block *L2Block) {
+ f.pendingL2BlocksToProcessWG.Add(1)
+
+ for _, tx := range l2Block.transactions {
+ f.worker.AddPendingTxToStore(tx.Hash, tx.From)
+ }
+
+ select {
+ case f.pendingL2BlocksToProcess <- l2Block:
+ case <-ctx.Done():
+ // If context is cancelled before we can send to the channel, we must decrement the WaitGroup count and
+ // delete the pending TxToStore added in the worker
+ f.pendingL2BlocksToProcessWG.Done()
+ for _, tx := range l2Block.transactions {
+ f.worker.DeletePendingTxToStore(tx.Hash, tx.From)
+ }
+ }
+}
+
+// addPendingL2BlockToStore adds a L2 block that is ready to be stored in the state DB once its flushid has been stored by the executor
+func (f *finalizer) addPendingL2BlockToStore(ctx context.Context, l2Block *L2Block) {
+ f.pendingL2BlocksToStoreWG.Add(1)
+
+ for _, tx := range l2Block.transactions {
+ f.worker.AddPendingTxToStore(tx.Hash, tx.From)
+ }
+
+ select {
+ case f.pendingL2BlocksToStore <- l2Block:
+ case <-ctx.Done():
+ // If context is cancelled before we can send to the channel, we must decrement the WaitGroup count and
+ // delete the pending TxToStore added in the worker
+ f.pendingL2BlocksToStoreWG.Done()
+ for _, tx := range l2Block.transactions {
+ f.worker.DeletePendingTxToStore(tx.Hash, tx.From)
+ }
+ }
+}
+
+// processPendingL2Blocks processes (executor) the pending to process L2 blocks
+func (f *finalizer) processPendingL2Blocks(ctx context.Context) {
+ for {
+ select {
+ case l2Block, ok := <-f.pendingL2BlocksToProcess:
+ if !ok {
+ // Channel is closed
+ return
+ }
+
+ log.Debugf("processing L2 block. Batch: %d, txs %d", l2Block.batchNumber, len(l2Block.transactions))
+ batchResponse, err := f.processL2Block(ctx, l2Block)
+ if err != nil {
+ f.halt(ctx, fmt.Errorf("error processing L2 block. Error: %s", err))
+ }
+
+ if len(batchResponse.BlockResponses) == 0 {
+ f.halt(ctx, fmt.Errorf("error processing L2 block. Error: BlockResponses returned by the executor is empty"))
+ }
+
+ blockResponse := batchResponse.BlockResponses[0]
+ log.Infof("L2 block %d processed. Batch: %d, txs: %d/%d, blockHash: %s, infoRoot: %s",
+ blockResponse.BlockNumber, l2Block.batchNumber, len(l2Block.transactions), len(blockResponse.TransactionResponses),
+ blockResponse.BlockHash, blockResponse.BlockInfoRoot.String())
+
+ l2Block.batchResponse = batchResponse
+
+ f.addPendingL2BlockToStore(ctx, l2Block)
+
+ f.pendingL2BlocksToProcessWG.Done()
+ case <-ctx.Done():
+ // The context was cancelled from outside, Wait for all goroutines to finish, cleanup and exit
+ f.pendingL2BlocksToProcessWG.Wait()
+ return
+ default:
+ time.Sleep(100 * time.Millisecond) //nolint:gomnd
+ }
+ }
+}
+
+// storePendingTransactions stores the pending L2 blocks in the database
+func (f *finalizer) storePendingL2Blocks(ctx context.Context) {
+ for {
+ select {
+ case l2Block, ok := <-f.pendingL2BlocksToStore:
+ if !ok {
+ // Channel is closed
+ return
+ }
+
+ // If the L2 block has txs wait until f.storedFlushID >= l2BlockToStore.flushId (this flushId is from the last tx in the L2 block)
+ if len(l2Block.transactions) > 0 {
+ lastFlushId := l2Block.transactions[len(l2Block.transactions)-1].FlushId
+ f.storedFlushIDCond.L.Lock()
+ for f.storedFlushID < lastFlushId {
+ f.storedFlushIDCond.Wait()
+ // check if context is done after waking up
+ if ctx.Err() != nil {
+ f.storedFlushIDCond.L.Unlock()
+ return
+ }
+ }
+ f.storedFlushIDCond.L.Unlock()
+ }
+
+ // If the L2 block has txs now f.storedFlushID >= l2BlockToStore.flushId, we can store tx
+ blockResponse := l2Block.batchResponse.BlockResponses[0]
+ log.Debugf("storing L2 block %d. Batch: %d, txs: %d/%d, blockHash: %s, infoRoot: %s",
+ blockResponse.BlockNumber, l2Block.batchNumber, len(l2Block.transactions), len(blockResponse.TransactionResponses),
+ blockResponse.BlockHash, blockResponse.BlockInfoRoot.String())
+
+ err := f.storeL2Block(ctx, l2Block)
+ if err != nil {
+ //TODO: this doesn't halt the finalizer, review howto do it
+ f.halt(ctx, fmt.Errorf("error storing L2 block %d. Error: %s", l2Block.batchResponse.BlockResponses[0].BlockNumber, err))
+ }
+
+ log.Infof("L2 block %d stored. Batch: %d, txs: %d/%d, blockHash: %s, infoRoot: %s",
+ blockResponse.BlockNumber, l2Block.batchNumber, len(l2Block.transactions), len(blockResponse.TransactionResponses),
+ blockResponse.BlockHash, blockResponse.BlockInfoRoot.String())
+
+ for _, tx := range l2Block.transactions {
+ // Delete the tx from the pending list in the worker (addrQueue)
+ f.worker.DeletePendingTxToStore(tx.Hash, tx.From)
+ }
+
+ f.pendingL2BlocksToStoreWG.Done()
+ case <-ctx.Done():
+ // The context was cancelled from outside, Wait for all goroutines to finish, cleanup and exit
+ f.pendingL2BlocksToStoreWG.Wait()
+ return
+ default:
+ time.Sleep(100 * time.Millisecond) //nolint:gomnd
+ }
+ }
+}
+
+// processL2Block process (executor) a L2 Block and adds it to the pendingL2BlocksToStore channel. It returns the response block from the executor
+func (f *finalizer) processL2Block(ctx context.Context, l2Block *L2Block) (*state.ProcessBatchResponse, error) {
+ processL2BLockError := func() {
+ // Log batch detailed info
+ log.Infof("[processL2Block] BatchNumber: %d, InitialStateRoot: %s, ExpectedNewStateRoot: %s", l2Block.batchNumber, l2Block.initialStateRoot.String(), l2Block.stateRoot.String())
+ for i, tx := range l2Block.transactions {
+ log.Infof("[processL2Block] BatchNumber: %d, tx position %d, tx hash: %s", l2Block.batchNumber, i, tx.HashStr)
+ }
+ }
+
+ log.Debugf("[processL2Block] BatchNumber: %d, Txs: %d, InitialStateRoot: %s, ExpectedNewStateRoot: %s", l2Block.batchNumber, len(l2Block.transactions), l2Block.initialStateRoot.String(), l2Block.stateRoot.String())
+
+ batchL2Data := []byte{}
+
+ // Add changeL2Block to batchL2Data
+ changeL2BlockBytes := f.dbManager.BuildChangeL2Block(l2Block.deltaTimestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex)
+ batchL2Data = append(batchL2Data, changeL2BlockBytes...)
+
+ // Add transactions data to batchL2Data
+ for _, tx := range l2Block.transactions {
+ ep, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(tx.GasPrice, tx.EffectiveGasPrice) //TODO: store effectivePercentage in TxTracker
+ if err != nil {
+ log.Errorf("[processL2Block] error calculating effective gas price percentage for tx %s. Error: %s", tx.HashStr, err)
+ return nil, err
+ }
+
+ //TODO: Create function to add epHex to batchL2Data as it's used in several places
+ epHex, err := hex.DecodeHex(fmt.Sprintf("%x", ep))
+ if err != nil {
+ log.Errorf("[processL2Block] error decoding hex value for effective gas price percentage for tx %s. Error: %s", tx.HashStr, err)
+ return nil, err
+ }
+
+ txData := append(tx.RawTx, epHex...)
+
+ batchL2Data = append(batchL2Data, txData...)
+ }
+
+ // TODO: review this request
+ executorBatchRequest := state.ProcessRequest{
+ BatchNumber: l2Block.batchNumber,
+ OldStateRoot: l2Block.initialStateRoot,
+ OldAccInputHash: l2Block.initialAccInputHash,
+ Coinbase: l2Block.coinbase,
+ L1InfoRoot_V2: mockL1InfoRoot,
+ TimestampLimit_V2: uint64(l2Block.timestamp.Unix()),
+ Transactions: batchL2Data,
+ SkipFirstChangeL2Block_V2: false,
+ SkipWriteBlockInfoRoot_V2: false,
+ Caller: stateMetrics.SequencerCallerLabel,
+ }
+
+ var (
+ err error
+ result *state.ProcessBatchResponse
+ )
+
+ result, err = f.executor.ProcessBatchV2(ctx, executorBatchRequest, true)
+ if err != nil {
+ processL2BLockError()
+ return nil, err
+ }
+
+ //TODO: check this error in first place?
+ if result.ExecutorError != nil {
+ processL2BLockError()
+ return nil, ErrExecutorError
+ }
+
+ if result.IsRomOOCError {
+ processL2BLockError()
+ return nil, ErrProcessBatchOOC
+ }
+
+ if result.NewStateRoot != l2Block.stateRoot {
+ log.Errorf("[processL2Block] new state root mismatch for L2 block %d in batch %d, expected: %s, got: %s",
+ result.BlockResponses[0].BlockNumber, l2Block.batchNumber, l2Block.stateRoot.String(), result.NewStateRoot.String())
+ processL2BLockError()
+ return nil, ErrStateRootNoMatch
+ }
+
+ //TODO: check that result.BlockResponse is not empty
+
+ return result, nil
+}
+
+// storeL2Block stores the L2 block in the state and updates the related batch and transactions
+func (f *finalizer) storeL2Block(ctx context.Context, l2Block *L2Block) error {
+ //log.Infof("storeL2Block: storing processed txToStore: %s", txToStore.response.TxHash.String())
+
+ blockResponse := l2Block.batchResponse.BlockResponses[0]
+ forkID := f.dbManager.GetForkIDByBatchNumber(l2Block.batchNumber)
+
+ dbTx, err := f.dbManager.BeginStateTransaction(ctx)
+ if err != nil {
+ return fmt.Errorf("[storeL2Block] error creating db transaction. Error: %w", err)
+ }
+
+ txsEGPLog := []*state.EffectiveGasPriceLog{}
+ for _, tx := range l2Block.transactions {
+ txsEGPLog = append(txsEGPLog, &tx.EGPLog)
+ }
+
+ // Store L2 block in the state
+ err = f.dbManager.StoreL2Block(ctx, l2Block.batchNumber, l2Block.batchResponse.BlockResponses[0], txsEGPLog, dbTx)
+ if err != nil {
+ return fmt.Errorf("[storeL2Block] database error on storing L2 block. Error: %w", err)
+ }
+
+ // If the L2 block belongs to a regular batch (not forced) then we need to update de BatchL2Data
+ // also in this case we need to update the status of the L2 block txs in the pool
+ // TODO: review this
+ if !l2Block.forcedBatch {
+ batch, err := f.dbManager.GetBatchByNumber(ctx, l2Block.batchNumber, dbTx)
+ if err != nil {
+ err2 := dbTx.Rollback(ctx)
+ if err2 != nil {
+ log.Errorf("[storeL2Block] failed to rollback dbTx when getting batch that gave err: %s. Rollback err: %s", err, err2)
+ }
+ return fmt.Errorf("[storeL2Block] error when getting batch %d from the state. Error: %s", l2Block.batchNumber, err)
+ }
+
+ // Add changeL2Block to batch.BatchL2Data
+ changeL2BlockBytes := f.dbManager.BuildChangeL2Block(l2Block.deltaTimestamp, l2Block.l1InfoTreeExitRoot.L1InfoTreeIndex)
+ batch.BatchL2Data = append(batch.BatchL2Data, changeL2BlockBytes...)
+
+ // Add transactions data to batch.BatchL2Data
+ for _, txResponse := range blockResponse.TransactionResponses {
+ txData, err := state.EncodeTransaction(txResponse.Tx, uint8(txResponse.EffectivePercentage), forkID)
+ if err != nil {
+ return err
+ }
+ batch.BatchL2Data = append(batch.BatchL2Data, txData...)
+ }
+
+ err = f.dbManager.UpdateBatch(ctx, l2Block.batchNumber, batch.BatchL2Data, l2Block.batchResponse.NewLocalExitRoot, dbTx)
+ if err != nil {
+ err2 := dbTx.Rollback(ctx)
+ if err2 != nil {
+ log.Errorf("[storeL2Block] failed to rollback dbTx when getting batch that gave err: %s. Rollback err: %s", err, err2)
+ }
+ return err
+ }
+
+ for _, txResponse := range blockResponse.TransactionResponses {
+ // Change Tx status to selected
+ err = f.dbManager.UpdateTxStatus(ctx, txResponse.TxHash, pool.TxStatusSelected, false, nil)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ err = dbTx.Commit(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Send L2 block to data streamer
+ err = f.dbManager.DSSendL2Block(l2Block)
+ if err != nil {
+ return fmt.Errorf("[storeL2Block] error sending L2 block %d to data streamer", blockResponse.BlockNumber)
+ }
+
+ return nil
+}
+
+// finalizeL2Block closes the current L2 block and opens a new one
+func (f *finalizer) finalizeL2Block(ctx context.Context) {
+ log.Debugf("finalizing L2 block")
+
+ f.closeWIPL2Block(ctx)
+
+ f.openNewWIPL2Block(ctx, nil)
+}
+
+func (f *finalizer) closeWIPL2Block(ctx context.Context) {
+ // If the L2 block is empty (no txs) We need to process it to update the state root before closing it
+ if f.wipL2Block.isEmpty() {
+ log.Debug("processing L2 block because it is empty")
+ if _, err := f.processTransaction(ctx, nil, true); err != nil {
+ f.halt(ctx, fmt.Errorf("failed to process empty L2 block. Error: %s ", err))
+ }
+ }
+
+ // Update L2 block stateroot to the WIP batch stateroot
+ f.wipL2Block.stateRoot = f.wipBatch.stateRoot
+
+ f.addPendingL2BlockToProcess(ctx, f.wipL2Block)
+}
+
+func (f *finalizer) openNewWIPL2Block(ctx context.Context, prevTimestamp *time.Time) {
+ //TODO: use better f.wipBatch.remainingResources.Sub() instead to substract directly
+ // Substract the bytes needed to store the changeL2Block of the new L2 block into the WIP batch
+ f.wipBatch.remainingResources.Bytes = f.wipBatch.remainingResources.Bytes - changeL2BlockSize
+ // Substract poseidon and arithmetic counters need to calculate the InfoRoot when the L2 block is closed
+ f.wipBatch.remainingResources.ZKCounters.UsedPoseidonHashes = f.wipBatch.remainingResources.ZKCounters.UsedPoseidonHashes - 256 //TODO: config param
+ f.wipBatch.remainingResources.ZKCounters.UsedArithmetics = f.wipBatch.remainingResources.ZKCounters.UsedArithmetics - 1 //TODO: config param
+ // After do the substracts we need to check if we have not reached the size limit for the batch
+ if f.isBatchResourcesExhausted() {
+ // If we have reached the limit then close the wip batch and create a new one
+ f.finalizeBatch(ctx)
+ }
+
+ // Initialize wipL2Block to a new L2 block
+ newL2Block := &L2Block{}
+
+ newL2Block.timestamp = now()
+ if prevTimestamp != nil {
+ newL2Block.deltaTimestamp = uint32(newL2Block.timestamp.Sub(*prevTimestamp).Truncate(time.Second).Seconds())
+ } else {
+ newL2Block.deltaTimestamp = uint32(newL2Block.timestamp.Sub(f.wipL2Block.timestamp).Truncate(time.Second).Seconds())
+ }
+
+ newL2Block.batchNumber = f.wipBatch.batchNumber
+ newL2Block.forcedBatch = false
+ newL2Block.initialStateRoot = f.wipBatch.stateRoot
+ newL2Block.stateRoot = f.wipBatch.stateRoot
+ newL2Block.initialAccInputHash = f.wipBatch.accInputHash
+ newL2Block.coinbase = f.wipBatch.coinbase
+ newL2Block.transactions = []*TxTracker{}
+
+ f.lastL1InfoTreeMux.Lock()
+ newL2Block.l1InfoTreeExitRoot = f.lastL1InfoTree
+ f.lastL1InfoTreeMux.Unlock()
+
+ f.wipL2Block = newL2Block
+
+ log.Debugf("new WIP L2 block created. Batch: %d, initialStateRoot: %s, timestamp: %d", f.wipL2Block.batchNumber, f.wipL2Block.initialStateRoot, f.wipL2Block.timestamp.Unix())
+}
diff --git a/sequencer/mock_db_manager.go b/sequencer/mock_db_manager.go
index c3bf968379..dd4a894686 100644
--- a/sequencer/mock_db_manager.go
+++ b/sequencer/mock_db_manager.go
@@ -52,6 +52,22 @@ func (_m *DbManagerMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, err
return r0, r1
}
+// BuildChangeL2Block provides a mock function with given fields: deltaTimestamp, l1InfoTreeIndex
+func (_m *DbManagerMock) BuildChangeL2Block(deltaTimestamp uint32, l1InfoTreeIndex uint32) []byte {
+ ret := _m.Called(deltaTimestamp, l1InfoTreeIndex)
+
+ var r0 []byte
+ if rf, ok := ret.Get(0).(func(uint32, uint32) []byte); ok {
+ r0 = rf(deltaTimestamp, l1InfoTreeIndex)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]byte)
+ }
+ }
+
+ return r0
+}
+
// CloseBatch provides a mock function with given fields: ctx, params
func (_m *DbManagerMock) CloseBatch(ctx context.Context, params ClosingBatchParameters) error {
ret := _m.Called(ctx, params)
@@ -104,6 +120,20 @@ func (_m *DbManagerMock) CreateFirstBatch(ctx context.Context, sequencerAddress
return r0
}
+// DSSendL2Block provides a mock function with given fields: l2Block
+func (_m *DbManagerMock) DSSendL2Block(l2Block *L2Block) error {
+ ret := _m.Called(l2Block)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(*L2Block) error); ok {
+ r0 = rf(l2Block)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// DeleteTransactionFromPool provides a mock function with given fields: ctx, txHash
func (_m *DbManagerMock) DeleteTransactionFromPool(ctx context.Context, txHash common.Hash) error {
ret := _m.Called(ctx, txHash)
@@ -414,6 +444,32 @@ func (_m *DbManagerMock) GetLastClosedBatch(ctx context.Context) (*state.Batch,
return r0, r1
}
+// GetLastL2Block provides a mock function with given fields: ctx, dbTx
+func (_m *DbManagerMock) GetLastL2Block(ctx context.Context, dbTx pgx.Tx) (*state.L2Block, error) {
+ ret := _m.Called(ctx, dbTx)
+
+ var r0 *state.L2Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.L2Block, error)); ok {
+ return rf(ctx, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.L2Block); ok {
+ r0 = rf(ctx, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.L2Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) error); ok {
+ r1 = rf(ctx, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// GetLastL2BlockHeader provides a mock function with given fields: ctx, dbTx
func (_m *DbManagerMock) GetLastL2BlockHeader(ctx context.Context, dbTx pgx.Tx) (*state.L2Header, error) {
ret := _m.Called(ctx, dbTx)
@@ -521,6 +577,30 @@ func (_m *DbManagerMock) GetLatestGer(ctx context.Context, maxBlockNumber uint64
return r0, r1, r2
}
+// GetLatestL1InfoRoot provides a mock function with given fields: ctx, maxBlockNumber
+func (_m *DbManagerMock) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) {
+ ret := _m.Called(ctx, maxBlockNumber)
+
+ var r0 state.L1InfoTreeExitRootStorageEntry
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.L1InfoTreeExitRootStorageEntry, error)); ok {
+ return rf(ctx, maxBlockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) state.L1InfoTreeExitRootStorageEntry); ok {
+ r0 = rf(ctx, maxBlockNumber)
+ } else {
+ r0 = ret.Get(0).(state.L1InfoTreeExitRootStorageEntry)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
+ r1 = rf(ctx, maxBlockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// GetLatestVirtualBatchTimestamp provides a mock function with given fields: ctx, dbTx
func (_m *DbManagerMock) GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error) {
ret := _m.Called(ctx, dbTx)
@@ -612,19 +692,19 @@ func (_m *DbManagerMock) GetTransactionsByBatchNumber(ctx context.Context, batch
}
// GetWIPBatch provides a mock function with given fields: ctx
-func (_m *DbManagerMock) GetWIPBatch(ctx context.Context) (*WipBatch, error) {
+func (_m *DbManagerMock) GetWIPBatch(ctx context.Context) (*Batch, error) {
ret := _m.Called(ctx)
- var r0 *WipBatch
+ var r0 *Batch
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context) (*WipBatch, error)); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) (*Batch, error)); ok {
return rf(ctx)
}
- if rf, ok := ret.Get(0).(func(context.Context) *WipBatch); ok {
+ if rf, ok := ret.Get(0).(func(context.Context) *Batch); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(*WipBatch)
+ r0 = ret.Get(0).(*Batch)
}
}
@@ -701,13 +781,27 @@ func (_m *DbManagerMock) ProcessForcedBatch(ForcedBatchNumber uint64, request st
return r0, r1
}
-// StoreProcessedTxAndDeleteFromPool provides a mock function with given fields: ctx, tx
-func (_m *DbManagerMock) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error {
- ret := _m.Called(ctx, tx)
+// StoreL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, txsEGPLog, dbTx
+func (_m *DbManagerMock) StoreL2Block(ctx context.Context, batchNumber uint64, l2Block *state.ProcessBlockResponse, txsEGPLog []*state.EffectiveGasPriceLog, dbTx pgx.Tx) error {
+ ret := _m.Called(ctx, batchNumber, l2Block, txsEGPLog, dbTx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.ProcessBlockResponse, []*state.EffectiveGasPriceLog, pgx.Tx) error); ok {
+ r0 = rf(ctx, batchNumber, l2Block, txsEGPLog, dbTx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// UpdateBatch provides a mock function with given fields: ctx, batchNumber, batchL2Data, localExitRoot, dbTx
+func (_m *DbManagerMock) UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
+ ret := _m.Called(ctx, batchNumber, batchL2Data, localExitRoot, dbTx)
var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, transactionToStore) error); ok {
- r0 = rf(ctx, tx)
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, common.Hash, pgx.Tx) error); ok {
+ r0 = rf(ctx, batchNumber, batchL2Data, localExitRoot, dbTx)
} else {
r0 = ret.Error(0)
}
diff --git a/sequencer/mock_etherman.go b/sequencer/mock_etherman.go
index f122c597e1..efd511ee58 100644
--- a/sequencer/mock_etherman.go
+++ b/sequencer/mock_etherman.go
@@ -81,30 +81,6 @@ func (_m *EthermanMock) EstimateGasSequenceBatches(sender common.Address, sequen
return r0, r1
}
-// GetLastBatchTimestamp provides a mock function with given fields:
-func (_m *EthermanMock) GetLastBatchTimestamp() (uint64, error) {
- ret := _m.Called()
-
- var r0 uint64
- var r1 error
- if rf, ok := ret.Get(0).(func() (uint64, error)); ok {
- return rf()
- }
- if rf, ok := ret.Get(0).(func() uint64); ok {
- r0 = rf()
- } else {
- r0 = ret.Get(0).(uint64)
- }
-
- if rf, ok := ret.Get(1).(func() error); ok {
- r1 = rf()
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
// GetLatestBatchNumber provides a mock function with given fields:
func (_m *EthermanMock) GetLatestBatchNumber() (uint64, error) {
ret := _m.Called()
diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go
index 426d87dc27..58615eaf00 100644
--- a/sequencer/mock_state.go
+++ b/sequencer/mock_state.go
@@ -28,6 +28,20 @@ type StateMock struct {
mock.Mock
}
+// AddL2Block provides a mock function with given fields: ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx
+func (_m *StateMock) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error {
+ ret := _m.Called(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, *state.L2Block, []*types.Receipt, []state.StoreTxEGPData, pgx.Tx) error); ok {
+ r0 = rf(ctx, batchNumber, l2Block, receipts, txsEGPData, dbTx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
// Begin provides a mock function with given fields: ctx
func (_m *StateMock) Begin(ctx context.Context) (pgx.Tx, error) {
ret := _m.Called(ctx)
@@ -696,6 +710,30 @@ func (_m *StateMock) GetLatestGlobalExitRoot(ctx context.Context, maxBlockNumber
return r0, r1, r2
}
+// GetLatestL1InfoRoot provides a mock function with given fields: ctx, maxBlockNumber
+func (_m *StateMock) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) {
+ ret := _m.Called(ctx, maxBlockNumber)
+
+ var r0 state.L1InfoTreeExitRootStorageEntry
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.L1InfoTreeExitRootStorageEntry, error)); ok {
+ return rf(ctx, maxBlockNumber)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64) state.L1InfoTreeExitRootStorageEntry); ok {
+ r0 = rf(ctx, maxBlockNumber)
+ } else {
+ r0 = ret.Get(0).(state.L1InfoTreeExitRootStorageEntry)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
+ r1 = rf(ctx, maxBlockNumber)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
// GetLatestVirtualBatchTimestamp provides a mock function with given fields: ctx, dbTx
func (_m *StateMock) GetLatestVirtualBatchTimestamp(ctx context.Context, dbTx pgx.Tx) (time.Time, error) {
ret := _m.Called(ctx, dbTx)
@@ -1004,22 +1042,8 @@ func (_m *StateMock) StoreTransaction(ctx context.Context, batchNumber uint64, p
return r0, r1
}
-// UpdateBatchL2Data provides a mock function with given fields: ctx, batchNumber, batchL2Data, dbTx
-func (_m *StateMock) UpdateBatchL2Data(ctx context.Context, batchNumber uint64, batchL2Data []byte, dbTx pgx.Tx) error {
- ret := _m.Called(ctx, batchNumber, batchL2Data, dbTx)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, uint64, []byte, pgx.Tx) error); ok {
- r0 = rf(ctx, batchNumber, batchL2Data, dbTx)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// UpdateBatchL2DataAndLER provides a mock function with given fields: ctx, batchNumber, batchL2Data, localExitRoot, dbTx
-func (_m *StateMock) UpdateBatchL2DataAndLER(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
+// UpdateBatch provides a mock function with given fields: ctx, batchNumber, batchL2Data, localExitRoot, dbTx
+func (_m *StateMock) UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
ret := _m.Called(ctx, batchNumber, batchL2Data, localExitRoot, dbTx)
var r0 error
diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go
index e14b48565c..d01e92ecc9 100644
--- a/sequencer/mock_worker.go
+++ b/sequencer/mock_worker.go
@@ -72,10 +72,14 @@ func (_m *WorkerMock) DeleteTx(txHash common.Hash, from common.Address) {
}
// GetBestFittingTx provides a mock function with given fields: resources
-func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) *TxTracker {
+func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) {
ret := _m.Called(resources)
var r0 *TxTracker
+ var r1 error
+ if rf, ok := ret.Get(0).(func(state.BatchResources) (*TxTracker, error)); ok {
+ return rf(resources)
+ }
if rf, ok := ret.Get(0).(func(state.BatchResources) *TxTracker); ok {
r0 = rf(resources)
} else {
@@ -84,7 +88,13 @@ func (_m *WorkerMock) GetBestFittingTx(resources state.BatchResources) *TxTracke
}
}
- return r0
+ if rf, ok := ret.Get(1).(func(state.BatchResources) error); ok {
+ r1 = rf(resources)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
}
// HandleL2Reorg provides a mock function with given fields: txHashes
diff --git a/sequencer/sequencer.go b/sequencer/sequencer.go
index af15bea776..8cedc7bbbb 100644
--- a/sequencer/sequencer.go
+++ b/sequencer/sequencer.go
@@ -35,9 +35,10 @@ type L2ReorgEvent struct {
// ClosingSignalCh is a struct that contains all the channels that are used to receive batch closing signals
type ClosingSignalCh struct {
- ForcedBatchCh chan state.ForcedBatch
- GERCh chan common.Hash
- L2ReorgCh chan L2ReorgEvent
+ ForcedBatchCh chan state.ForcedBatch
+ GERCh chan common.Hash
+ L1InfoTreeExitRootCh chan state.L1InfoTreeExitRootStorageEntry
+ L2ReorgCh chan L2ReorgEvent
}
// New init sequencer
@@ -70,9 +71,10 @@ func (s *Sequencer) Start(ctx context.Context) {
metrics.Register()
closingSignalCh := ClosingSignalCh{
- ForcedBatchCh: make(chan state.ForcedBatch),
- GERCh: make(chan common.Hash),
- L2ReorgCh: make(chan L2ReorgEvent),
+ ForcedBatchCh: make(chan state.ForcedBatch),
+ GERCh: make(chan common.Hash),
+ L1InfoTreeExitRootCh: make(chan state.L1InfoTreeExitRootStorageEntry),
+ L2ReorgCh: make(chan L2ReorgEvent),
}
err := s.pool.MarkWIPTxsAsPending(ctx)
@@ -106,7 +108,7 @@ func (s *Sequencer) Start(ctx context.Context) {
streamServer = dbManager.streamServer
}
- finalizer := newFinalizer(s.cfg.Finalizer, s.poolCfg, worker, dbManager, s.state, s.address, s.isSynced, closingSignalCh, s.batchCfg.Constraints, s.eventLog, streamServer)
+ finalizer := newFinalizer(s.cfg.Finalizer, s.poolCfg, worker, dbManager, s.state, s.etherman, s.address, s.isSynced, closingSignalCh, s.batchCfg.Constraints, s.eventLog, streamServer)
go finalizer.Start(ctx)
closingSignalsManager := newClosingSignalsManager(ctx, finalizer.dbManager, closingSignalCh, finalizer.cfg, s.etherman)
@@ -183,7 +185,10 @@ func waitTick(ctx context.Context, ticker *time.Ticker) {
}
func (s *Sequencer) isSynced(ctx context.Context) bool {
- lastSyncedBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
+ //TODO: uncoment this
+ return true
+
+ /*lastSyncedBatchNum, err := s.state.GetLastVirtualBatchNum(ctx, nil)
if err != nil && err != state.ErrNotFound {
log.Errorf("failed to get last isSynced batch, err: %v", err)
return false
@@ -206,5 +211,5 @@ func (s *Sequencer) isSynced(ctx context.Context) bool {
return false
}
- return true
+ return true*/
}
diff --git a/sequencer/txtracker.go b/sequencer/txtracker.go
index 49efba791f..6e8a68627d 100644
--- a/sequencer/txtracker.go
+++ b/sequencer/txtracker.go
@@ -29,6 +29,7 @@ type TxTracker struct {
EGPLog state.EffectiveGasPriceLog
L1GasPrice uint64
L2GasPrice uint64
+ FlushId uint64
}
// newTxTracker creates and inti a TxTracker
diff --git a/sequencer/worker.go b/sequencer/worker.go
index fc3c2b8c1e..a50f086c91 100644
--- a/sequencer/worker.go
+++ b/sequencer/worker.go
@@ -53,7 +53,7 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
// Make sure the transaction's batch resources are within the constraints.
if !w.batchConstraints.IsWithinConstraints(tx.BatchResources.ZKCounters) {
- log.Errorf("OutOfCounters Error (Node level) for tx: %s", tx.Hash.String())
+ log.Errorf("outOfCounters Error (Node level) for tx: %s", tx.Hash.String())
w.workerMutex.Unlock()
return nil, pool.ErrOutOfCounters
}
@@ -65,19 +65,19 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
root, err := w.state.GetLastStateRoot(ctx, nil)
if err != nil {
- dropReason = fmt.Errorf("AddTx GetLastStateRoot error: %v", err)
+ dropReason = fmt.Errorf("[AddTxTracker] GetLastStateRoot error: %v", err)
log.Error(dropReason)
return nil, dropReason
}
nonce, err := w.state.GetNonceByStateRoot(ctx, tx.From, root)
if err != nil {
- dropReason = fmt.Errorf("AddTx GetNonceByStateRoot error: %v", err)
+ dropReason = fmt.Errorf("[AddTxTracker] GetNonceByStateRoot error: %v", err)
log.Error(dropReason)
return nil, dropReason
}
balance, err := w.state.GetBalanceByStateRoot(ctx, tx.From, root)
if err != nil {
- dropReason = fmt.Errorf("AddTx GetBalanceByStateRoot error: %v", err)
+ dropReason = fmt.Errorf("[AddTxTracker] GetBalanceByStateRoot error: %v", err)
log.Error(dropReason)
return nil, dropReason
}
@@ -88,31 +88,31 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *T
w.workerMutex.Lock()
w.pool[tx.FromStr] = addr
- log.Infof("AddTx new addrQueue created for addr(%s) nonce(%d) balance(%s)", tx.FromStr, nonce.Uint64(), balance.String())
+ log.Debugf("new addrQueue created for addr(%s) nonce(%d) balance(%s)", tx.FromStr, nonce.Uint64(), balance.String())
}
// Add the txTracker to Addr and get the newReadyTx and prevReadyTx
- log.Infof("AddTx new tx(%s) nonce(%d) gasPrice(%d) to addrQueue(%s) nonce(%d) balance(%d)", tx.HashStr, tx.Nonce, tx.GasPrice, addr.fromStr, addr.currentNonce, addr.currentBalance)
+ log.Infof("added new tx(%s) nonce(%d) gasPrice(%d) to addrQueue(%s) nonce(%d) balance(%d)", tx.HashStr, tx.Nonce, tx.GasPrice, addr.fromStr, addr.currentNonce, addr.currentBalance)
var newReadyTx, prevReadyTx, repTx *TxTracker
newReadyTx, prevReadyTx, repTx, dropReason = addr.addTx(tx)
if dropReason != nil {
- log.Infof("AddTx tx(%s) dropped from addrQueue(%s), reason: %s", tx.HashStr, tx.FromStr, dropReason.Error())
+ log.Infof("dropped tx(%s) from addrQueue(%s), reason: %s", tx.HashStr, tx.FromStr, dropReason.Error())
w.workerMutex.Unlock()
return repTx, dropReason
}
// Update the txSortedList (if needed)
if prevReadyTx != nil {
- log.Infof("AddTx prevReadyTx(%s) nonce(%d) gasPrice(%d) addr(%s) deleted from TxSortedList", prevReadyTx.HashStr, prevReadyTx.Nonce, prevReadyTx.GasPrice, tx.FromStr)
+ log.Debugf("[AddTxTracker] prevReadyTx(%s) nonce(%d) gasPrice(%d) addr(%s) deleted from TxSortedList", prevReadyTx.HashStr, prevReadyTx.Nonce, prevReadyTx.GasPrice, tx.FromStr)
w.txSortedList.delete(prevReadyTx)
}
if newReadyTx != nil {
- log.Infof("AddTx newReadyTx(%s) nonce(%d) gasPrice(%d) addr(%s) added to TxSortedList", newReadyTx.HashStr, newReadyTx.Nonce, newReadyTx.GasPrice, tx.FromStr)
+ log.Debugf("[AddTxTracker] newReadyTx(%s) nonce(%d) gasPrice(%d) addr(%s) added to TxSortedList", newReadyTx.HashStr, newReadyTx.Nonce, newReadyTx.GasPrice, tx.FromStr)
w.txSortedList.add(newReadyTx)
}
if repTx != nil {
- log.Infof("AddTx replacedTx(%s) nonce(%d) gasPrice(%d) addr(%s) has been replaced", repTx.HashStr, repTx.Nonce, repTx.GasPrice, tx.FromStr)
+ log.Debugf("[AddTxTracker] replacedTx(%s) nonce(%d) gasPrice(%d) addr(%s) has been replaced", repTx.HashStr, repTx.Nonce, repTx.GasPrice, tx.FromStr)
}
w.workerMutex.Unlock()
@@ -127,11 +127,11 @@ func (w *Worker) applyAddressUpdate(from common.Address, fromNonce *uint64, from
// Update the TxSortedList (if needed)
if prevReadyTx != nil {
- log.Infof("applyAddressUpdate prevReadyTx(%s) nonce(%d) gasPrice(%d) deleted from TxSortedList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.GasPrice)
+ log.Debugf("[applyAddressUpdate] prevReadyTx(%s) nonce(%d) gasPrice(%d) deleted from TxSortedList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.GasPrice)
w.txSortedList.delete(prevReadyTx)
}
if newReadyTx != nil {
- log.Infof("applyAddressUpdate newReadyTx(%s) nonce(%d) gasPrice(%d) added to TxSortedList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.GasPrice)
+ log.Debugf("[applyAddressUpdate] newReadyTx(%s) nonce(%d) gasPrice(%d) added to TxSortedList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.GasPrice)
w.txSortedList.add(newReadyTx)
}
@@ -146,7 +146,7 @@ func (w *Worker) UpdateAfterSingleSuccessfulTxExecution(from common.Address, tou
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
if len(touchedAddresses) == 0 {
- log.Warnf("UpdateAfterSingleSuccessfulTxExecution touchedAddresses is nil or empty")
+ log.Warnf("[UpdateAfterSingleSuccessfulTxExecution] touchedAddresses is nil or empty")
}
txsToDelete := make([]*TxTracker, 0)
touchedFrom, found := touchedAddresses[from]
@@ -154,7 +154,7 @@ func (w *Worker) UpdateAfterSingleSuccessfulTxExecution(from common.Address, tou
fromNonce, fromBalance := touchedFrom.Nonce, touchedFrom.Balance
_, _, txsToDelete = w.applyAddressUpdate(from, fromNonce, fromBalance)
} else {
- log.Warnf("UpdateAfterSingleSuccessfulTxExecution from(%s) not found in touchedAddresses", from.String())
+ log.Warnf("[updateAfterSingleSuccessfulTxExecution] from(%s) not found in touchedAddresses", from.String())
}
for addr, addressInfo := range touchedAddresses {
@@ -170,7 +170,7 @@ func (w *Worker) UpdateAfterSingleSuccessfulTxExecution(from common.Address, tou
func (w *Worker) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker {
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
- log.Infof("MoveTxToNotReady tx(%s) from(%s) actualNonce(%d) actualBalance(%s)", txHash.String(), from.String(), actualNonce, actualBalance.String())
+ log.Debugf("[MoveTxToNotReady] tx(%s) from(%s) actualNonce(%d) actualBalance(%s)", txHash.String(), from.String(), actualNonce, actualBalance.String())
addrQueue, found := w.pool[from.String()]
if found {
@@ -180,7 +180,7 @@ func (w *Worker) MoveTxToNotReady(txHash common.Hash, from common.Address, actua
if addrQueue.readyTx != nil {
readyHashStr = addrQueue.readyTx.HashStr
}
- log.Warnf("MoveTxToNotReady txHash(%s) is not the readyTx(%s)", txHash.String(), readyHashStr)
+ log.Warnf("[MoveTxToNotReady] txHash(%s) is not the readyTx(%s)", txHash.String(), readyHashStr)
}
}
_, _, txsToDelete := w.applyAddressUpdate(from, actualNonce, actualBalance)
@@ -197,11 +197,11 @@ func (w *Worker) DeleteTx(txHash common.Hash, addr common.Address) {
if found {
deletedReadyTx := addrQueue.deleteTx(txHash)
if deletedReadyTx != nil {
- log.Infof("DeleteTx tx(%s) deleted from TxSortedList", deletedReadyTx.Hash.String())
+ log.Debugf("[DeleteTx] tx(%s) deleted from TxSortedList", deletedReadyTx.Hash.String())
w.txSortedList.delete(deletedReadyTx)
}
} else {
- log.Warnf("DeleteTx addrQueue(%s) not found", addr.String())
+ log.Warnf("[DeleteTx] addrQueue(%s) not found", addr.String())
}
}
@@ -214,7 +214,7 @@ func (w *Worker) DeleteForcedTx(txHash common.Hash, addr common.Address) {
if found {
addrQueue.deleteForcedTx(txHash)
} else {
- log.Warnf("DeleteForcedTx addrQueue(%s) not found", addr.String())
+ log.Warnf("[DeleteForcedTx] addrQueue(%s) not found", addr.String())
}
}
@@ -223,23 +223,23 @@ func (w *Worker) UpdateTxZKCounters(txHash common.Hash, addr common.Address, cou
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
- log.Infof("UpdateTxZKCounters tx(%s) addr(%s)", txHash.String(), addr.String())
- log.Debugf("UpdateTxZKCounters counters.CumulativeGasUsed: %d", counters.GasUsed)
- log.Debugf("UpdateTxZKCounters counters.UsedKeccakHashes: %d", counters.UsedKeccakHashes)
- log.Debugf("UpdateTxZKCounters counters.UsedPoseidonHashes: %d", counters.UsedPoseidonHashes)
- log.Debugf("UpdateTxZKCounters counters.UsedPoseidonPaddings: %d", counters.UsedPoseidonPaddings)
- log.Debugf("UpdateTxZKCounters counters.UsedMemAligns: %d", counters.UsedMemAligns)
- log.Debugf("UpdateTxZKCounters counters.UsedArithmetics: %d", counters.UsedArithmetics)
- log.Debugf("UpdateTxZKCounters counters.UsedBinaries: %d", counters.UsedBinaries)
- log.Debugf("UpdateTxZKCounters counters.UsedSteps: %d", counters.UsedSteps)
- log.Debugf("UpdateTxZKCounters counters.UsedSha256Hashes_V2: %d", counters.UsedSha256Hashes_V2)
+ log.Infof("update ZK counters for tx(%s) addr(%s)", txHash.String(), addr.String())
+ log.Debugf("[UpdateTxZKCounters] counters.CumulativeGasUsed: %d", counters.GasUsed)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedKeccakHashes: %d", counters.UsedKeccakHashes)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedPoseidonHashes: %d", counters.UsedPoseidonHashes)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedPoseidonPaddings: %d", counters.UsedPoseidonPaddings)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedMemAligns: %d", counters.UsedMemAligns)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedArithmetics: %d", counters.UsedArithmetics)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedBinaries: %d", counters.UsedBinaries)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedSteps: %d", counters.UsedSteps)
+ log.Debugf("[UpdateTxZKCounters] counters.UsedSha256Hashes_V2: %d", counters.UsedSha256Hashes_V2)
addrQueue, found := w.pool[addr.String()]
if found {
addrQueue.UpdateTxZKCounters(txHash, counters)
} else {
- log.Warnf("UpdateTxZKCounters addrQueue(%s) not found", addr.String())
+ log.Warnf("[UpdateTxZKCounters] addrQueue(%s) not found", addr.String())
}
}
@@ -253,7 +253,7 @@ func (w *Worker) AddPendingTxToStore(txHash common.Hash, addr common.Address) {
if found {
addrQueue.addPendingTxToStore(txHash)
} else {
- log.Warnf("AddPendingTxToStore addrQueue(%s) not found", addr.String())
+ log.Warnf("[AddPendingTxToStore] addrQueue(%s) not found", addr.String())
}
}
@@ -267,7 +267,7 @@ func (w *Worker) AddForcedTx(txHash common.Hash, addr common.Address) {
if found {
addrQueue.addForcedTx(txHash)
} else {
- log.Warnf("AddForcedTx addrQueue(%s) not found", addr.String())
+ log.Warnf("[AddForcedTx] addrQueue(%s) not found", addr.String())
}
}
@@ -281,15 +281,19 @@ func (w *Worker) DeletePendingTxToStore(txHash common.Hash, addr common.Address)
if found {
addrQueue.deletePendingTxToStore(txHash)
} else {
- log.Warnf("DeletePendingTxToStore addrQueue(%s) not found", addr.String())
+ log.Warnf("[DeletePendingTxToStore] addrQueue(%s) not found", addr.String())
}
}
// GetBestFittingTx gets the most efficient tx that fits in the available batch resources
-func (w *Worker) GetBestFittingTx(resources state.BatchResources) *TxTracker {
+func (w *Worker) GetBestFittingTx(resources state.BatchResources) (*TxTracker, error) {
w.workerMutex.Lock()
defer w.workerMutex.Unlock()
+ if w.txSortedList.len() == 0 {
+ return nil, ErrTransactionsListEmpty
+ }
+
var (
tx *TxTracker
foundMutex sync.RWMutex
@@ -334,10 +338,11 @@ func (w *Worker) GetBestFittingTx(resources state.BatchResources) *TxTracker {
wg.Wait()
if foundAt != -1 {
- log.Infof("GetBestFittingTx found tx(%s) at index(%d) with gasPrice(%d)", tx.Hash.String(), foundAt, tx.GasPrice)
+ log.Debugf("[GetBestFittingTx] found tx(%s) at index(%d) with gasPrice(%d)", tx.Hash.String(), foundAt, tx.GasPrice)
+ return tx, nil
+ } else {
+ return nil, ErrNoFittingTransaction
}
-
- return tx
}
// ExpireTransactions deletes old txs
@@ -347,7 +352,7 @@ func (w *Worker) ExpireTransactions(maxTime time.Duration) []*TxTracker {
var txs []*TxTracker
- log.Info("ExpireTransactions start. addrQueue len: ", len(w.pool))
+ log.Debug("expire transactions started. addrQueue len: ", len(w.pool))
for _, addrQueue := range w.pool {
subTxs, prevReadyTx := addrQueue.ExpireTransactions(maxTime)
txs = append(txs, subTxs...)
@@ -360,7 +365,7 @@ func (w *Worker) ExpireTransactions(maxTime time.Duration) []*TxTracker {
delete(w.pool, addrQueue.fromStr)
}
}
- log.Info("ExpireTransactions end. addrQueue len: ", len(w.pool), " deleteCount: ", len(txs))
+ log.Debug("expire transactions ended. addrQueue len: ", len(w.pool), " deleteCount: ", len(txs))
return txs
}
diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go
index 101e09ff80..4e844e21ab 100644
--- a/sequencer/worker_test.go
+++ b/sequencer/worker_test.go
@@ -261,7 +261,7 @@ func TestWorkerGetBestTx(t *testing.T) {
ct := 0
for {
- tx := worker.GetBestFittingTx(rc)
+ tx, _ := worker.GetBestFittingTx(rc)
if tx != nil {
if ct >= len(expectedGetBestTx) {
t.Fatalf("Error getting more best tx than expected. Expected=%d, Actual=%d", len(expectedGetBestTx), ct+1)
diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go
index 46795e2b28..af867cfe12 100644
--- a/sequencesender/sequencesender.go
+++ b/sequencesender/sequencesender.go
@@ -162,17 +162,19 @@ func (s *SequenceSender) getSequencesToSend(ctx context.Context) ([]types.Sequen
}
seq := types.Sequence{
- GlobalExitRoot: batch.GlobalExitRoot,
- Timestamp: batch.Timestamp.Unix(),
+ GlobalExitRoot: batch.GlobalExitRoot, //TODO: set empty for regular batches
+ Timestamp: batch.Timestamp.Unix(), //TODO: set empty for regular batches
BatchL2Data: batch.BatchL2Data,
BatchNumber: batch.BatchNumber,
}
if batch.ForcedBatchNum != nil {
+ //TODO: Assign GER, timestamp(forcedAt) and l1block.parentHash to seq
forcedBatch, err := s.state.GetForcedBatch(ctx, *batch.ForcedBatchNum, nil)
if err != nil {
return nil, err
}
+
seq.ForcedBatchTimestamp = forcedBatch.ForcedAt.Unix()
}
diff --git a/state/batchV2.go b/state/batchV2.go
index 6d1025902b..cecc601706 100644
--- a/state/batchV2.go
+++ b/state/batchV2.go
@@ -41,7 +41,8 @@ func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, upda
updateMT = cTrue
}
- forkID := s.GetForkIDByBatchNumber(request.BatchNumber)
+ //forkID := s.GetForkIDByBatchNumber(request.BatchNumber)
+ forkID := uint64(7)
// Create Batch
var processBatchRequest = &executor.ProcessBatchRequestV2{
@@ -62,7 +63,7 @@ func (s *State) ProcessBatchV2(ctx context.Context, request ProcessRequest, upda
processBatchRequest.SkipFirstChangeL2Block = cTrue
}
- if request.SkipWriteBlockInfoRoot {
+ if request.SkipWriteBlockInfoRoot_V2 {
processBatchRequest.SkipWriteBlockInfoRoot = cTrue
}
@@ -235,7 +236,7 @@ func (s *State) sendBatchRequestToExecutorV2(ctx context.Context, processBatchRe
if caller != metrics.DiscardCallerLabel {
metrics.ExecutorProcessingTime(string(caller), elapsed)
}
- log.Infof("Batch: %d took %v to be processed by the executor ", processBatchRequest.OldBatchNum+1, elapsed)
+ log.Infof("batch %d took %v to be processed by the executor ", processBatchRequest.OldBatchNum+1, elapsed)
return res, err
}
diff --git a/state/helper.go b/state/helper.go
index c49ec388fb..749fe4981d 100644
--- a/state/helper.go
+++ b/state/helper.go
@@ -276,7 +276,7 @@ func DecodeTx(encodedTx string) (*types.Transaction, error) {
return tx, nil
}
-func generateReceipt(blockNumber *big.Int, processedTx *ProcessTransactionResponse) *types.Receipt {
+func GenerateReceipt(blockNumber *big.Int, processedTx *ProcessTransactionResponse) *types.Receipt {
receipt := &types.Receipt{
Type: uint8(processedTx.Type),
PostState: processedTx.StateRoot.Bytes(),
diff --git a/state/interfaces.go b/state/interfaces.go
index ef27e4cb04..49a357dc77 100644
--- a/state/interfaces.go
+++ b/state/interfaces.go
@@ -109,7 +109,7 @@ type storage interface {
GetLastClosedBatch(ctx context.Context, dbTx pgx.Tx) (*Batch, error)
GetLastClosedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
UpdateBatchL2Data(ctx context.Context, batchNumber uint64, batchL2Data []byte, dbTx pgx.Tx) error
- UpdateBatchL2DataAndLER(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error
+ UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error
AddAccumulatedInputHash(ctx context.Context, batchNum uint64, accInputHash common.Hash, dbTx pgx.Tx) error
GetLastTrustedForcedBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
AddTrustedReorg(ctx context.Context, reorg *TrustedReorg, dbTx pgx.Tx) error
@@ -130,6 +130,7 @@ type storage interface {
GetLogsByBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) ([]*types.Log, error)
AddL1InfoRootToExitRoot(ctx context.Context, exitRoot *L1InfoTreeExitRootStorageEntry, dbTx pgx.Tx) error
GetAllL1InfoRootEntries(ctx context.Context, dbTx pgx.Tx) ([]L1InfoTreeExitRootStorageEntry, error)
+ GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (L1InfoTreeExitRootStorageEntry, error)
UpdateForkIDIntervalsInMemory(intervals []ForkIDInterval)
AddForkIDInterval(ctx context.Context, newForkID ForkIDInterval, dbTx pgx.Tx) error
GetForkIDByBlockNumber(blockNumber uint64) uint64
diff --git a/state/pgstatestorage/batch.go b/state/pgstatestorage/batch.go
index 5f41e647eb..ffabc35da1 100644
--- a/state/pgstatestorage/batch.go
+++ b/state/pgstatestorage/batch.go
@@ -782,8 +782,8 @@ func (p *PostgresStorage) UpdateBatchL2Data(ctx context.Context, batchNumber uin
return err
}
-// UpdateBatchL2DataAndLER updates data tx data in a batch and the local exit root
-func (p *PostgresStorage) UpdateBatchL2DataAndLER(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
+// UpdateBatch updates the data in a batch
+func (p *PostgresStorage) UpdateBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, localExitRoot common.Hash, dbTx pgx.Tx) error {
const updateL2DataSQL = "UPDATE state.batch SET raw_txs_data = $2, local_exit_root = $3 WHERE batch_num = $1"
e := p.getExecQuerier(dbTx)
diff --git a/state/pgstatestorage/l1infotree.go b/state/pgstatestorage/l1infotree.go
index ef35db933a..07d0d6d6bf 100644
--- a/state/pgstatestorage/l1infotree.go
+++ b/state/pgstatestorage/l1infotree.go
@@ -2,6 +2,7 @@ package pgstatestorage
import (
"context"
+ "errors"
"github.com/0xPolygonHermez/zkevm-node/state"
"github.com/jackc/pgx/v4"
@@ -46,6 +47,25 @@ func (p *PostgresStorage) GetAllL1InfoRootEntries(ctx context.Context, dbTx pgx.
return entries, nil
}
+// GetLatestL1InfoRoot is used to get the latest L1InfoRoot
+func (p *PostgresStorage) GetLatestL1InfoRoot(ctx context.Context, maxBlockNumber uint64) (state.L1InfoTreeExitRootStorageEntry, error) {
+ const getL1InfoRootSQL = `SELECT block_num, timestamp, mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index
+ FROM state.exit_root
+ WHERE l1_info_tree_index IS NOT NULL AND block_num <= $1
+ ORDER BY l1_info_tree_index DESC`
+
+ entry := state.L1InfoTreeExitRootStorageEntry{}
+
+ e := p.getExecQuerier(nil)
+ err := e.QueryRow(ctx, getL1InfoRootSQL, maxBlockNumber).Scan(&entry.BlockNumber, &entry.Timestamp, &entry.MainnetExitRoot, &entry.RollupExitRoot, &entry.GlobalExitRoot.GlobalExitRoot,
+ &entry.PreviousBlockHash, &entry.L1InfoTreeRoot, &entry.L1InfoTreeIndex)
+
+ if !errors.Is(err, pgx.ErrNoRows) {
+ return entry, err
+ }
+
+ return entry, nil
+}
func (p *PostgresStorage) GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint32, error) {
const getLatestIndexSQL = `SELECT max(l1_info_tree_index) as l1_info_tree_index FROM state.exit_root
WHERE l1_info_tree_index IS NOT NULL`
@@ -59,4 +79,4 @@ func (p *PostgresStorage) GetLatestIndex(ctx context.Context, dbTx pgx.Tx) (uint
return 0, state.ErrNotFound
}
return *l1InfoTreeIndex, nil
-}
\ No newline at end of file
+}
diff --git a/state/pgstatestorage/l2block.go b/state/pgstatestorage/l2block.go
index 31a975ae30..b33d7d4ba0 100644
--- a/state/pgstatestorage/l2block.go
+++ b/state/pgstatestorage/l2block.go
@@ -151,6 +151,7 @@ func (p *PostgresStorage) GetL2BlockTransactionCountByNumber(ctx context.Context
// AddL2Block adds a new L2 block to the State Store
func (p *PostgresStorage) AddL2Block(ctx context.Context, batchNumber uint64, l2Block *state.L2Block, receipts []*types.Receipt, txsEGPData []state.StoreTxEGPData, dbTx pgx.Tx) error {
+ //TODO: Optmize this function using only one SQL (with several values) to insert all the txs, receips and logs
log.Debugf("[AddL2Block] adding l2 block: %v", l2Block.NumberU64())
start := time.Now()
diff --git a/state/transaction.go b/state/transaction.go
index 12ebb6fa5b..a707ee1936 100644
--- a/state/transaction.go
+++ b/state/transaction.go
@@ -232,7 +232,7 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce
})
transactions := []*types.Transaction{&processedTx.Tx}
- receipt := generateReceipt(header.Number, processedTx)
+ receipt := GenerateReceipt(header.Number, processedTx)
if !CheckLogOrder(receipt.Logs) {
return fmt.Errorf("error: logs received from executor are not in order")
}
@@ -550,7 +550,7 @@ func (s *State) StoreTransaction(ctx context.Context, batchNumber uint64, proces
})
transactions := []*types.Transaction{&processedTx.Tx}
- receipt := generateReceipt(header.Number, processedTx)
+ receipt := GenerateReceipt(header.Number, processedTx)
receipts := []*types.Receipt{receipt}
// Create l2Block to be able to calculate its hash
diff --git a/state/types.go b/state/types.go
index 3954d31429..2308619dc2 100644
--- a/state/types.go
+++ b/state/types.go
@@ -27,7 +27,7 @@ type ProcessRequest struct {
TimestampLimit_V2 uint64
Caller metrics.CallerLabel
SkipFirstChangeL2Block_V2 bool
- SkipWriteBlockInfoRoot bool
+ SkipWriteBlockInfoRoot_V2 bool
ForkID uint64
}
diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml
index 1e76729e0f..24bf49c05c 100644
--- a/test/config/debug.node.config.toml
+++ b/test/config/debug.node.config.toml
@@ -92,11 +92,14 @@ MaxTxLifetime = "3h"
SleepDuration = "100ms"
ResourcePercentageToCloseBatch = 10
GERFinalityNumberOfBlocks = 0
+ ForcedBatchesFinalityNumberOfBlocks = 0
+ L1InfoRootFinalityNumberOfBlocks = 0
ClosingSignalsManagerWaitForCheckingL1Timeout = "10s"
ClosingSignalsManagerWaitForCheckingGER = "10s"
ClosingSignalsManagerWaitForCheckingForcedBatches = "10s"
- ForcedBatchesFinalityNumberOfBlocks = 0
+ WaitForCheckingL1InfoRoot = "10s"
TimestampResolution = "10s"
+ L2BlockTime = "3s"
StopSequencerOnBatchNum = 0
[Sequencer.DBManager]
PoolRetrievalInterval = "500ms"
diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml
index 481b9abe62..f05077cc5d 100644
--- a/test/config/test.node.config.toml
+++ b/test/config/test.node.config.toml
@@ -107,11 +107,14 @@ MaxTxLifetime = "3h"
SleepDuration = "100ms"
ResourcePercentageToCloseBatch = 10
GERFinalityNumberOfBlocks = 0
+ ForcedBatchesFinalityNumberOfBlocks = 0
+ L1InfoRootFinalityNumberOfBlocks = 0
ClosingSignalsManagerWaitForCheckingL1Timeout = "10s"
ClosingSignalsManagerWaitForCheckingGER = "10s"
ClosingSignalsManagerWaitForCheckingForcedBatches = "10s"
- ForcedBatchesFinalityNumberOfBlocks = 0
+ WaitForCheckingL1InfoRoot = "10s"
TimestampResolution = "10s"
+ L2BlockTime = "3s"
StopSequencerOnBatchNum = 0
[Sequencer.DBManager]
PoolRetrievalInterval = "500ms"