diff --git a/.github/scripts/cpu_monitor.sh b/.github/scripts/cpu_monitor.sh new file mode 100755 index 00000000000..563fb952273 --- /dev/null +++ b/.github/scripts/cpu_monitor.sh @@ -0,0 +1,136 @@ +#!/bin/bash + +# Configuration +THRESHOLD=80 +MEASUREMENTS_FILE="/tmp/cpu_measurements.txt" +MONITOR_INTERVAL=5 # seconds +PROCESS_NAME="cdk-erigon" +DETAILED_LOG="/tmp/cpu_detailed.log" + +# Function to get CPU usage for all matching processes +get_process_cpu() { + # Clear previous detailed log + > "$DETAILED_LOG" + + # Get PIDs of cdk-erigon processes + pids=$(pgrep -f "[c]dk-erigon") + + if [ -n "$pids" ]; then + # Use top in batch mode for each PID to get current CPU usage + for pid in $pids; do + # Get process command + if [[ "$OSTYPE" == "darwin"* ]]; then + cmd=$(ps -p $pid -o command=) + cpu=$(top -l 1 -pid $pid | tail -1 | awk '{print $3}') + else + cmd=$(ps -p $pid -o cmd=) + cpu=$(top -b -n 1 -p $pid | tail -1 | awk '{print $9}') + fi + # Get current CPU usage + echo "$pid $cpu $cmd" >> "$DETAILED_LOG" + done + fi + + # Sum total CPU usage + total_cpu=$(awk '{sum += $2} END {printf "%.1f", sum}' "$DETAILED_LOG") + + # Return 0 if no process found + if [ -z "$total_cpu" ]; then + echo "0.0" + else + echo "$total_cpu" + fi +} + +# Function to show current process details +show_process_details() { + if [ -s "$DETAILED_LOG" ]; then + echo "Individual process details:" + printf "%-10s %-8s %-s\n" "PID" "CPU%" "Command" + echo "----------------------------------------" + while read -r line; do + pid=$(echo "$line" | awk '{print $1}') + cpu=$(echo "$line" | awk '{print $2}') + cmd=$(echo "$line" | cut -d' ' -f3-) + printf "%-10s %-8.1f %-s\n" "$pid" "$cpu" "$cmd" + done < "$DETAILED_LOG" + echo "----------------------------------------" + else + echo "No $PROCESS_NAME processes found" + fi +} + +# Function to analyze CPU measurements +analyze_cpu() { + if [ -f "$MEASUREMENTS_FILE" ]; then + # Calculate statistics + avg_cpu=$(awk '{ sum += $1 } END { print sum/NR }' "$MEASUREMENTS_FILE") + avg_cpu_rounded=$(printf "%.1f" "$avg_cpu") + max_cpu=$(awk 'BEGIN{max=0} {if($1>max) max=$1} END{print max}' "$MEASUREMENTS_FILE") + measurement_count=$(wc -l < "$MEASUREMENTS_FILE") + + echo "" + echo "=== CPU Usage Analysis for all $PROCESS_NAME processes ===" + echo "Number of measurements: $measurement_count" + echo "Average Combined CPU Usage: $avg_cpu_rounded%" + echo "Peak Combined CPU Usage: $max_cpu%" + echo "Threshold: $THRESHOLD%" + + # Get final process details for the report + echo "" + echo "Final process state:" + show_process_details + + # Compare with threshold + if [ "$(echo "$avg_cpu > $THRESHOLD" | bc -l)" -eq 1 ]; then + echo "" + echo "ERROR: Average CPU usage ($avg_cpu_rounded%) exceeded threshold of $THRESHOLD%" + cleanup_and_exit 1 + else + echo "" + echo "SUCCESS: CPU usage ($avg_cpu_rounded%) is within threshold of $THRESHOLD%" + cleanup_and_exit 0 + fi + else + echo "ERROR: No CPU measurements found at $MEASUREMENTS_FILE" + cleanup_and_exit 1 + fi +} + +# Function to clean up and exit +cleanup_and_exit() { + exit_code=$1 + rm -f "$DETAILED_LOG" + exit $exit_code +} + +# Function to handle interruption +handle_interrupt() { + echo "" + echo "Monitoring interrupted. Analyzing collected data..." + analyze_cpu +} + +# Set up trap for various signals +trap handle_interrupt TERM INT + +# Clear measurements file +> "$MEASUREMENTS_FILE" +> "$DETAILED_LOG" + +echo "Starting CPU monitoring for all '$PROCESS_NAME' processes" +echo "Storing measurements in $MEASUREMENTS_FILE" +echo "Monitoring interval: ${MONITOR_INTERVAL}s" +echo "Press Ctrl+C to stop monitoring and see analysis" +echo "" + +# Start monitoring loop +while true; do + # Get CPU usage for all matching processes + cpu_usage=$(get_process_cpu) + echo "$cpu_usage" >> "$MEASUREMENTS_FILE" + echo "$(date '+%Y-%m-%d %H:%M:%S') - Combined CPU Usage: $cpu_usage%" + show_process_details + echo "" + sleep "$MONITOR_INTERVAL" +done \ No newline at end of file diff --git a/.github/workflows/ci_zkevm.yml b/.github/workflows/ci_zkevm.yml index c2ea48397fe..f906bf93847 100644 --- a/.github/workflows/ci_zkevm.yml +++ b/.github/workflows/ci_zkevm.yml @@ -116,6 +116,23 @@ jobs: run: | kurtosis run --enclave cdk-v1 --image-download always . '{"args": {"data_availability_mode": "${{ matrix.da-mode }}", "cdk_erigon_node_image": "cdk-erigon:local"}}' + - name: Run process with CPU monitoring + working-directory: ./cdk-erigon + run: | + # Start monitoring in background + bash ./.github/scripts/cpu_monitor.sh & + monitor_pid=$! + + # Wait for 30 seconds + sleep 30 + + # Stop monitoring and get analysis + kill -TERM $monitor_pid + wait $monitor_pid || { + echo "CPU usage exceeded threshold!" + exit 1 + } + - name: Monitor verified batches working-directory: ./kurtosis-cdk shell: bash diff --git a/.gitignore b/.gitignore index c7c9cc3c978..afb082b0f5e 100644 --- a/.gitignore +++ b/.gitignore @@ -113,6 +113,7 @@ vendor # X Layer data/ +test/sqlite start.sh xlayerconfig-mainnet.yaml xlayerconfig-testnet.yaml diff --git a/Makefile b/Makefile index 7db93f0e97a..69ed8bb762a 100644 --- a/Makefile +++ b/Makefile @@ -194,6 +194,9 @@ lint: @./erigon-lib/tools/golangci_lint.sh @./erigon-lib/tools/mod_tidy_check.sh +cpu_monitor: + @.github/scripts/cpu_monitor.sh + ## clean: cleans the go cache, build dir, libmdbx db dir clean: go clean -cache diff --git a/README.md b/README.md index 4e58a99cadb..4fc265aecdd 100644 --- a/README.md +++ b/README.md @@ -208,6 +208,15 @@ Useful config entries: - `zkevm.sync-limit`: This will ensure the network only syncs to a given block height. - `debug.timers`: This will enable debug timers in the logs to help with performance tuning. Recording timings of witness generation, etc. at INFO level. +Metrics and pprof configuration flags: + +- `metrics:` Enables or disables the metrics collection. Set to true to enable. +- `metrics.addr`: The address on which the metrics server will listen. Default is "0.0.0.0". +- `metrics.port`: The port on which the metrics server will listen. Default is 6060. +- `pprof`: Enables or disables the pprof profiling. Set to true to enable. +- `pprof.addr`: The address on which the pprof server will listen. Default is "0.0.0.0". +- `pprof.port`: The port on which the pprof server will listen. Default is 6061. + *** diff --git a/cmd/rpcdaemon/cli/config_zkevm.go b/cmd/rpcdaemon/cli/config_zkevm.go index 050dc1643c9..59e26d6c5bb 100644 --- a/cmd/rpcdaemon/cli/config_zkevm.go +++ b/cmd/rpcdaemon/cli/config_zkevm.go @@ -2,11 +2,12 @@ package cli import ( "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/log/v3" ) -func StartDataStream(server *datastreamer.StreamServer) error { +func StartDataStream(server server.StreamServer) error { if server == nil { // no stream server to start, we might not have the right flags set to create one return nil diff --git a/cmd/rpcdaemon/health/check_time.go b/cmd/rpcdaemon/health/check_time.go index ffdfde24bde..43ea4af63cb 100644 --- a/cmd/rpcdaemon/health/check_time.go +++ b/cmd/rpcdaemon/health/check_time.go @@ -5,6 +5,8 @@ import ( "fmt" "net/http" + "github.com/ledgerwatch/erigon-lib/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" ) @@ -20,13 +22,13 @@ func checkTime( if err != nil { return err } - timestamp := 0 + timestamp := uint64(0) if ts, ok := i["timestamp"]; ok { - if cs, ok := ts.(uint64); ok { - timestamp = int(cs) + if cs, ok := ts.(hexutil.Uint64); ok { + timestamp = cs.Uint64() } } - if timestamp < seconds { + if timestamp < uint64(seconds) { return fmt.Errorf("%w: got ts: %d, need: %d", errTimestampTooOld, timestamp, seconds) } diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go index 419c7b9912b..079bedb3165 100644 --- a/cmd/rpcdaemon/health/health_test.go +++ b/cmd/rpcdaemon/health/health_test.go @@ -245,7 +245,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(-10 * time.Second).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(-10 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -264,7 +264,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(-1 * time.Hour).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(-1 * time.Hour).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -283,7 +283,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(1), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(1 * time.Hour).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(1 * time.Hour).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, @@ -319,7 +319,7 @@ func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { netApiResponse: hexutil.Uint(10), netApiError: nil, ethApiBlockResult: map[string]interface{}{ - "timestamp": uint64(time.Now().Add(1 * time.Second).Unix()), + "timestamp": hexutil.Uint64(time.Now().Add(1 * time.Second).Unix()), }, ethApiBlockError: nil, ethApiSyncingResult: false, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 6b74caaeaa7..e99fb7cdbfb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -758,6 +758,16 @@ var ( Usage: "Seal the batch immediately when detecting a counter overflow", Value: false, } + MockWitnessGeneration = cli.BoolFlag{ + Name: "zkevm.mock-witness-generation", + Usage: "Mock the witness generation", + Value: false, + } + WitnessContractInclusion = cli.StringFlag{ + Name: "zkevm.witness-contract-inclusion", + Usage: "Contracts that will have all of their storage added to the witness every time", + Value: "", + } ACLPrintHistory = cli.IntFlag{ Name: "acl.print-history", Usage: "Number of entries to print from the ACL history on node start up", diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 5e3e5877269..b082b364351 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -430,6 +430,11 @@ func (sdb *IntraBlockState) SeenAccount(addr libcommon.Address) bool { return ok } +func (sdb *IntraBlockState) IsDirtyJournal(addr libcommon.Address) bool { + _, ok := sdb.journal.dirties[addr] + return ok +} + func (sdb *IntraBlockState) HasLiveState(addr libcommon.Address, key *libcommon.Hash) bool { if stateObject := sdb.stateObjects[addr]; stateObject != nil { if _, ok := stateObject.originStorage[*key]; ok { diff --git a/core/state/trie_db.go b/core/state/trie_db.go index fb23f799cf8..3a13013b83e 100644 --- a/core/state/trie_db.go +++ b/core/state/trie_db.go @@ -885,7 +885,7 @@ func (tds *TrieDbState) GetTrieHash() common.Hash { return tds.t.Hash() } -func (tds *TrieDbState) ResolveSMTRetainList() (*trie.RetainList, error) { +func (tds *TrieDbState) ResolveSMTRetainList(inclusion map[libcommon.Address][]libcommon.Hash) (*trie.RetainList, error) { // Aggregating the current buffer, if any if tds.currentBuffer != nil { if tds.aggregateBuffer == nil { @@ -967,6 +967,16 @@ func (tds *TrieDbState) ResolveSMTRetainList() (*trie.RetainList, error) { keys = append(keys, smtPath) } + for address, slots := range inclusion { + for _, slot := range slots { + smtPath, err := getSMTPath(address.String(), slot.String()) + if err != nil { + return nil, err + } + keys = append(keys, smtPath) + } + } + rl := trie.NewRetainList(0) for _, key := range keys { diff --git a/core/vm/contracts_zkevm.go b/core/vm/contracts_zkevm.go index 037f2bc7c42..895e1162820 100644 --- a/core/vm/contracts_zkevm.go +++ b/core/vm/contracts_zkevm.go @@ -395,18 +395,26 @@ func (c *bigModExp_zkevm) Run(input []byte) ([]byte, error) { baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64() expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64() modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64() + base = big.NewInt(0) + exp = big.NewInt(0) + mod = big.NewInt(0) ) - if len(input) > 96 { - input = input[96:] - } else { - input = input[:0] + + if len(input) >= 96 + int(baseLen) { + base = new(big.Int).SetBytes(getData(input, 96, uint64(baseLen))) + } + if len(input) >= 96 + int(baseLen) + int(expLen) { + exp = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen), uint64(expLen))) + } + if len(input) >= 96 + int(baseLen) + int(expLen) + int(modLen) { + mod = new(big.Int).SetBytes(getData(input, 96 + uint64(baseLen) + uint64(expLen), uint64(modLen))) + } + if len(input) < 96 + int(baseLen) + int(expLen) + int(modLen) { + input = common.LeftPadBytes(input, 96 + int(baseLen) + int(expLen) + int(modLen)) } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) v []byte baseBitLen = base.BitLen() expBitLen = exp.BitLen() diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go index 4f8570f84b0..dcde6ab4e86 100644 --- a/core/vm/evmtypes/evmtypes.go +++ b/core/vm/evmtypes/evmtypes.go @@ -84,6 +84,7 @@ type IntraBlockState interface { SetState(common.Address, *common.Hash, uint256.Int) HasLiveAccount(addr common.Address) bool SeenAccount(addr common.Address) bool + IsDirtyJournal(addr common.Address) bool HasLiveState(addr common.Address, key *common.Hash) bool GetTransientState(addr common.Address, key common.Hash) uint256.Int diff --git a/core/vm/instructions_zkevm_test.go b/core/vm/instructions_zkevm_test.go index c2398b68c95..a63038c321b 100644 --- a/core/vm/instructions_zkevm_test.go +++ b/core/vm/instructions_zkevm_test.go @@ -211,3 +211,5 @@ func (ibs TestIntraBlockState) Prepare(rules *chain.Rules, sender, coinbase comm func (ibs TestIntraBlockState) Selfdestruct6780(common.Address) {} func (ibs TestIntraBlockState) SetDisableBalanceInc(disable bool) {} + +func (ibs TestIntraBlockState) IsDirtyJournal(addr common.Address) bool { return false } diff --git a/docs/endpoints/endpoints.md b/docs/endpoints/endpoints.md index 20f27be1a59..e8756c070a7 100644 --- a/docs/endpoints/endpoints.md +++ b/docs/endpoints/endpoints.md @@ -200,6 +200,8 @@ If the endpoint is not in the list below, it means this specific endpoint is not - zkevm_getL2BlockInfoTree - zkevm_getLatestGlobalExitRoot - zkevm_getProverInput +- zkevm_getRollupAddress +- zkevm_getRollupManagerAddress - zkevm_getVersionHistory - zkevm_getWitness - zkevm_isBlockConsolidated diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 8d5bb64e7b8..9b26cf3182c 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -59,7 +59,8 @@ type MdbxOpts struct { // must be in the range from 12.5% (almost empty) to 50% (half empty) // which corresponds to the range from 8192 and to 32768 in units respectively log log.Logger - roTxsLimiter *semaphore.Weighted + readTxLimiter *semaphore.Weighted + writeTxLimiter *semaphore.Weighted bucketsCfg TableCfgFunc path string syncPeriod time.Duration @@ -109,7 +110,7 @@ func (opts MdbxOpts) DirtySpace(s uint64) MdbxOpts { } func (opts MdbxOpts) RoTxsLimiter(l *semaphore.Weighted) MdbxOpts { - opts.roTxsLimiter = l + opts.readTxLimiter = l return opts } @@ -386,20 +387,26 @@ func (opts MdbxOpts) Open(ctx context.Context) (kv.RwDB, error) { // return nil, err //} - if opts.roTxsLimiter == nil { + if opts.readTxLimiter == nil { targetSemCount := int64(runtime.GOMAXPROCS(-1) * 16) - opts.roTxsLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen + opts.readTxLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen + } + + if opts.writeTxLimiter == nil { + targetSemCount := int64(runtime.GOMAXPROCS(-1)) - 1 + opts.writeTxLimiter = semaphore.NewWeighted(targetSemCount) // 1 less than max to allow unlocking to happen } txsCountMutex := &sync.Mutex{} db := &MdbxKV{ - opts: opts, - env: env, - log: opts.log, - buckets: kv.TableCfg{}, - txSize: dirtyPagesLimit * opts.pageSize, - roTxsLimiter: opts.roTxsLimiter, + opts: opts, + env: env, + log: opts.log, + buckets: kv.TableCfg{}, + txSize: dirtyPagesLimit * opts.pageSize, + readTxLimiter: opts.readTxLimiter, + writeTxLimiter: opts.writeTxLimiter, txsCountMutex: txsCountMutex, txsAllDoneOnCloseCond: sync.NewCond(txsCountMutex), @@ -468,14 +475,15 @@ func (opts MdbxOpts) MustOpen() kv.RwDB { } type MdbxKV struct { - log log.Logger - env *mdbx.Env - buckets kv.TableCfg - roTxsLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor) - opts MdbxOpts - txSize uint64 - closed atomic.Bool - path string + log log.Logger + env *mdbx.Env + buckets kv.TableCfg + readTxLimiter *semaphore.Weighted // does limit amount of concurrent Ro transactions - in most casess runtime.NumCPU() is good value for this channel capacity - this channel can be shared with other components (like Decompressor) + writeTxLimiter *semaphore.Weighted + opts MdbxOpts + txSize uint64 + closed atomic.Bool + path string txsCount uint txsCountMutex *sync.Mutex @@ -748,7 +756,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { } // will return nil err if context is cancelled (may appear to acquire the semaphore) - if semErr := db.roTxsLimiter.Acquire(ctx, 1); semErr != nil { + if semErr := db.readTxLimiter.Acquire(ctx, 1); semErr != nil { db.trackTxEnd() return nil, fmt.Errorf("mdbx.MdbxKV.BeginRo: roTxsLimiter error %w", semErr) } @@ -757,7 +765,7 @@ func (db *MdbxKV) BeginRo(ctx context.Context) (txn kv.Tx, err error) { if txn == nil { // on error, or if there is whatever reason that we don't return a tx, // we need to free up the limiter slot, otherwise it could lead to deadlocks - db.roTxsLimiter.Release(1) + db.readTxLimiter.Release(1) db.trackTxEnd() } }() @@ -784,17 +792,34 @@ func (db *MdbxKV) BeginRwNosync(ctx context.Context) (kv.RwTx, error) { } func (db *MdbxKV) beginRw(ctx context.Context, flags uint) (txn kv.RwTx, err error) { + if db.closed.Load() { + return nil, fmt.Errorf("db closed") + } + select { case <-ctx.Done(): return nil, ctx.Err() default: } + // will return nil err if context is cancelled (may appear to acquire the semaphore) + if semErr := db.writeTxLimiter.Acquire(ctx, 1); semErr != nil { + return nil, semErr + } + if !db.trackTxBegin() { return nil, fmt.Errorf("db closed") } runtime.LockOSThread() + defer func() { + if txn == nil { + // on error, or if there is whatever reason that we don't return a tx, + // we need to free up the limiter slot, otherwise it could lead to deadlocks + db.writeTxLimiter.Release(1) + runtime.UnlockOSThread() + } + }() tx, err := db.env.BeginTxn(nil, flags) if err != nil { runtime.UnlockOSThread() // unlock only in case of error. normal flow is "defer .Rollback()" @@ -1048,8 +1073,9 @@ func (tx *MdbxTx) Commit() error { tx.tx = nil tx.db.trackTxEnd() if tx.readOnly { - tx.db.roTxsLimiter.Release(1) + tx.db.readTxLimiter.Release(1) } else { + tx.db.writeTxLimiter.Release(1) runtime.UnlockOSThread() } tx.db.leakDetector.Del(tx.id) @@ -1099,8 +1125,9 @@ func (tx *MdbxTx) Rollback() { tx.tx = nil tx.db.trackTxEnd() if tx.readOnly { - tx.db.roTxsLimiter.Release(1) + tx.db.readTxLimiter.Release(1) } else { + tx.db.writeTxLimiter.Release(1) runtime.UnlockOSThread() } tx.db.leakDetector.Del(tx.id) diff --git a/erigon-lib/kv/mdbx/kv_mdbx_test.go b/erigon-lib/kv/mdbx/kv_mdbx_test.go index d30d8a5624d..c31a1379404 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx_test.go +++ b/erigon-lib/kv/mdbx/kv_mdbx_test.go @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" + "sync" ) func BaseCaseDB(t *testing.T) kv.RwDB { @@ -1087,3 +1088,41 @@ func TestDB_BatchTime(t *testing.T) { t.Fatal(err) } } + +func TestDeadlock(t *testing.T) { + path := t.TempDir() + logger := log.New() + table := "Table" + db := NewMDBX(logger).InMem(path).WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + table: kv.TableCfgItem{Flags: kv.DupSort}, + kv.Sequence: kv.TableCfgItem{}, + } + }).MapSize(128 * datasize.MB).MustOpen() + t.Cleanup(db.Close) + + wg := sync.WaitGroup{} + for i := 0; i < 300_000; i++ { + wg.Add(1) + go func(idx int) { + ctx := context.Background() + // create a write transaction every X requests + if idx%5 == 0 { + tx, err := db.BeginRw(ctx) + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + } else { + tx, err := db.BeginRo(ctx) + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + } + wg.Done() + }(i) + } + + wg.Wait() +} diff --git a/eth/backend.go b/eth/backend.go index c6d9db5a8c8..194ad435f26 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -130,6 +130,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/contracts" "github.com/ledgerwatch/erigon/zk/datastream/client" + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/l1_cache" "github.com/ledgerwatch/erigon/zk/l1infotree" @@ -143,6 +144,8 @@ import ( "github.com/ledgerwatch/erigon/zkevm/etherman" ) +var dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() + // Config contains the configuration options of the ETH protocol. // Deprecated: use ethconfig.Config instead. type Config = ethconfig.Config @@ -220,7 +223,7 @@ type Ethereum struct { logger log.Logger // zk - dataStream *datastreamer.StreamServer + streamServer server.StreamServer l1Syncer *syncer.L1Syncer etherManClients []*etherman.Client l1Cache *l1_cache.L1Cache @@ -978,8 +981,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger Level: "warn", Outputs: nil, } + // todo [zkevm] read the stream version from config and figure out what system id is used for - backend.dataStream, err = datastreamer.NewServer(uint16(httpCfg.DataStreamPort), uint8(backend.config.DatastreamVersion), 1, datastreamer.StreamType(1), file, httpCfg.DataStreamWriteTimeout, httpCfg.DataStreamInactivityTimeout, httpCfg.DataStreamInactivityCheckInterval, logConfig) + backend.streamServer, err = dataStreamServerFactory.CreateStreamServer(uint16(httpCfg.DataStreamPort), uint8(backend.config.DatastreamVersion), 1, datastreamer.StreamType(1), file, httpCfg.DataStreamWriteTimeout, httpCfg.DataStreamInactivityTimeout, httpCfg.DataStreamInactivityCheckInterval, logConfig) if err != nil { return nil, err } @@ -987,7 +991,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // recovery here now, if the stream got into a bad state we want to be able to delete the file and have // the stream re-populated from scratch. So we check the stream for the latest header and if it is // 0 we can just set the datastream progress to 0 also which will force a re-population of the stream - latestHeader := backend.dataStream.GetHeader() + latestHeader := backend.streamServer.GetHeader() if latestHeader.TotalEntries == 0 { log.Info("[dataStream] setting the stream progress to 0") backend.preStartTasks.WarmUpDataStream = true @@ -1106,6 +1110,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger l1InfoTreeUpdater := l1infotree.NewUpdater(cfg.Zk, l1InfoTreeSyncer) + var dataStreamServer server.DataStreamServer + if backend.streamServer != nil { + dataStreamServer = dataStreamServerFactory.CreateDataStreamServer(backend.streamServer, backend.chainConfig.ChainID.Uint64()) + } + if isSequencer { // if we are sequencing transactions, we do the sequencing loop... witnessGenerator := witness.NewGenerator( @@ -1116,6 +1125,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.chainConfig, backend.config.Zk, backend.engine, + backend.config.WitnessContractInclusion, ) var legacyExecutors []*legacy_executor_verifier.Executor = make([]*legacy_executor_verifier.Executor, 0, len(cfg.ExecutorUrls)) @@ -1135,10 +1145,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger verifier := legacy_executor_verifier.NewLegacyExecutorVerifier( *cfg.Zk, legacyExecutors, - backend.chainConfig, backend.chainDB, witnessGenerator, - backend.dataStream, + dataStreamServer, ) if cfg.Zk.Limbo { @@ -1173,7 +1182,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.agg, backend.forkValidator, backend.engine, - backend.dataStream, + dataStreamServer, backend.l1Syncer, seqVerSyncer, l1BlockSyncer, @@ -1215,7 +1224,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.engine, backend.l1Syncer, streamClient, - backend.dataStream, + dataStreamServer, l1InfoTreeUpdater, ) @@ -1336,8 +1345,12 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig // apiList := jsonrpc.APIList(chainKv, borDb, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config, backend.l1Syncer) // authApiList := jsonrpc.AuthAPIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config) + var dataStreamServer server.DataStreamServer + if s.streamServer != nil { + dataStreamServer = dataStreamServerFactory.CreateDataStreamServer(s.streamServer, config.Zk.L2ChainId) + } var gpCache *jsonrpc.GasPriceCache - s.apiList, gpCache = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, s.txPool2, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, config, s.l1Syncer, s.logger, s.dataStream) + s.apiList, gpCache = jsonrpc.APIList(chainKv, ethRpcClient, txPoolRpcClient, s.txPool2, miningRpcClient, ff, stateCache, blockReader, s.agg, &httpRpcCfg, s.engine, config, s.l1Syncer, s.logger, dataStreamServer) // For X Layer if s.txPool2 != nil && gpCache != nil { @@ -1380,7 +1393,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig } go func() { - if err := cli.StartDataStream(s.dataStream); err != nil { + if err := cli.StartDataStream(s.streamServer); err != nil { log.Error(err.Error()) return } @@ -1403,8 +1416,9 @@ func (s *Ethereum) PreStart() error { // we don't know when the server has actually started as it doesn't expose a signal that is has spun up // so here we loop and take a brief pause waiting for it to be ready attempts := 0 + dataStreamServer := dataStreamServerFactory.CreateDataStreamServer(s.streamServer, s.chainConfig.ChainID.Uint64()) for { - _, err = zkStages.CatchupDatastream(s.sentryCtx, "stream-catchup", tx, s.dataStream, s.chainConfig.ChainID.Uint64()) + _, err = zkStages.CatchupDatastream(s.sentryCtx, "stream-catchup", tx, dataStreamServer) if err != nil { if errors.Is(err, datastreamer.ErrAtomicOpNotAllowed) { attempts++ diff --git a/eth/ethconfig/config_zkevm.go b/eth/ethconfig/config_zkevm.go index 6c557ff62be..76f0c796f75 100644 --- a/eth/ethconfig/config_zkevm.go +++ b/eth/ethconfig/config_zkevm.go @@ -96,6 +96,8 @@ type Zk struct { InfoTreeUpdateInterval time.Duration BadBatches []uint64 SealBatchImmediatelyOnOverflow bool + MockWitnessGeneration bool + WitnessContractInclusion []common.Address } var DefaultZkConfig = Zk{ diff --git a/eth/tracers/native/zero.go b/eth/tracers/native/zero.go index 65c32c30069..3593a38eea1 100644 --- a/eth/tracers/native/zero.go +++ b/eth/tracers/native/zero.go @@ -220,7 +220,7 @@ func (t *zeroTracer) CaptureTxEnd(restGas uint64) { trace.StorageRead = nil } - if len(trace.StorageWritten) == 0 || !hasLiveAccount { + if len(trace.StorageWritten) == 0 || !hasLiveAccount || !t.env.IntraBlockState().IsDirtyJournal(addr) { trace.StorageWritten = nil } else { // A slot write could be reverted if the transaction is reverted. We will need to read the value from the statedb again to get the correct value. @@ -379,6 +379,7 @@ func (t *zeroTracer) addSLOADToAccount(addr libcommon.Address, key libcommon.Has func (t *zeroTracer) addSSTOREToAccount(addr libcommon.Address, key libcommon.Hash, value *uint256.Int) { t.tx.Traces[addr].StorageWritten[key] = value + t.tx.Traces[addr].StorageReadMap[key] = struct{}{} t.addOpCodeToAccount(addr, vm.SSTORE) } diff --git a/smt/pkg/smt/witness_test.go b/smt/pkg/smt/witness_test.go index b055f375e78..87dae548915 100644 --- a/smt/pkg/smt/witness_test.go +++ b/smt/pkg/smt/witness_test.go @@ -52,7 +52,8 @@ func prepareSMT(t *testing.T) (*smt.SMT, *trie.RetainList) { err = intraBlockState.CommitBlock(&chain.Rules{}, w) require.NoError(t, err, "error committing block") - rl, err := tds.ResolveSMTRetainList() + inclusions := make(map[libcommon.Address][]libcommon.Hash) + rl, err := tds.ResolveSMTRetainList(inclusions) require.NoError(t, err, "error resolving state trie") memdb := db.NewMemDb() diff --git a/test/Makefile b/test/Makefile index 1b2edad7fe5..888957f94fc 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,6 +1,5 @@ DOCKER_COMPOSE := docker compose -f docker-compose.yml DOCKER_STATELESS_EXECUTOR := xlayer-executor -DOCKER_STATE_DB := xlayer-state-db DOCKER_SEQ_SENDER := xlayer-seqs DOCKER_AGGREGATOR := xlayer-agg DOCKER_AGGREGATOR_DB := xlayer-agg-db @@ -20,9 +19,9 @@ DOCKER_DATA_AVAILABILITY_DB := xlayer-da-db DOCKER_POOL_DB := xlayer-pool-db DOCKER_POOL_MANAGER := xlayer-pool-manager DOCKER_SIGNER := xlayer-signer +DOCKER_DS := xlayer-ds RUN_DOCKER_STATELESS_EXECUTOR := $(DOCKER_COMPOSE) up -d $(DOCKER_STATELESS_EXECUTOR) -RUN_DOCKER_STATE_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_STATE_DB) RUN_DOCKER_SEQ_SENDER := $(DOCKER_COMPOSE) up -d $(DOCKER_SEQ_SENDER) RUN_DOCKER_AGGREGATOR := $(DOCKER_COMPOSE) up -d $(DOCKER_AGGREGATOR) RUN_DOCKER_AGGREGATOR_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_AGGREGATOR_DB) @@ -42,8 +41,9 @@ RUN_DOCKER_DATA_AVAILABILITY_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_DATA_AVAILAB RUN_DOCKER_POOL_DB := $(DOCKER_COMPOSE) up -d $(DOCKER_POOL_DB) RUN_DOCKER_POOL_MANAGER := $(DOCKER_COMPOSE) up -d $(DOCKER_POOL_MANAGER) RUN_DOCKER_SIGNER := $(DOCKER_COMPOSE) up -d $(DOCKER_SIGNER) +RUN_DOCKER_DS := $(DOCKER_COMPOSE) up -d $(DOCKER_DS) -STOP := $(DOCKER_COMPOSE) down --remove-orphans +STOP := $(DOCKER_COMPOSE) down --remove-orphans; rm -rf sqlite .PHONY: run run: ## Runs a full node @@ -51,18 +51,18 @@ run: ## Runs a full node $(RUN_DOCKER_L1_NETWORK) $(RUN_DOCKER_DATA_AVAILABILITY_DB) $(RUN_DOCKER_POOL_DB) - $(RUN_DOCKER_STATE_DB) $(RUN_DOCKER_AGGREGATOR_DB) sleep 3 $(RUN_DOCKER_DATA_AVAILABILITY) $(RUN_DOCKER_APPROVE) + $(RUN_DOCKER_STATELESS_EXECUTOR) # app services - $(RUN_DOCKER_STATELESS_EXECUTOR) sleep 3 $(RUN_DOCKER_SEQ) $(RUN_DOCKER_PROVER) sleep 10 + $(RUN_DOCKER_DS) $(RUN_DOCKER_SIGNER) $(RUN_DOCKER_SEQ_SENDER) $(RUN_DOCKER_AGGREGATOR) @@ -75,7 +75,6 @@ all: ## Runs a full node $(RUN_DOCKER_L1_NETWORK) $(RUN_DOCKER_DATA_AVAILABILITY_DB) $(RUN_DOCKER_POOL_DB) - $(RUN_DOCKER_STATE_DB) $(RUN_DOCKER_AGGREGATOR_DB) $(RUN_DOCKER_BRIDGE_DB) $(RUN_DOCKER_BRIDGE_REDIS) @@ -83,13 +82,15 @@ all: ## Runs a full node $(RUN_DOCKER_BRIDGE_COIN_KAFKA) sleep 3 $(RUN_DOCKER_DATA_AVAILABILITY) + $(RUN_DOCKER_APPROVE) + $(RUN_DOCKER_STATELESS_EXECUTOR) # app services - #sleep 3 - #$(RUN_DOCKER_STATELESS_EXECUTOR) + sleep 3 $(RUN_DOCKER_SEQ) $(RUN_DOCKER_PROVER) sleep 10 + $(RUN_DOCKER_DS) $(RUN_DOCKER_SIGNER) $(RUN_DOCKER_SEQ_SENDER) $(RUN_DOCKER_AGGREGATOR) @@ -97,7 +98,7 @@ all: ## Runs a full node $(RUN_DOCKER_RPC) # bridge services - sleep 3 + sleep 30 $(RUN_DOCKER_BRIDGE_SERVICE) sleep 3 $(RUN_DOCKER_BRIDGE_UI) @@ -109,9 +110,12 @@ stop: ## Stops all services .PHONY: min-run min-run: ## Runs a minimal node $(RUN_DOCKER_L1_NETWORK) - sleep 3 + $(RUN_DOCKER_STATELESS_EXECUTOR) + sleep 10 $(RUN_DOCKER_SEQ) - sleep 20 + sleep 10 + $(RUN_DOCKER_DS) + sleep 10 $(RUN_DOCKER_RPC) diff --git a/test/config/cdk.config.toml b/test/config/cdk.config.toml index 33d2a7c67bc..2f26c9b80d8 100644 --- a/test/config/cdk.config.toml +++ b/test/config/cdk.config.toml @@ -6,6 +6,13 @@ Environment = "development" # "production" or "development" Level = "info" Outputs = ["stderr"] +[NetworkConfig.L1] +L1ChainID = 1337 +PolAddr = "0x5FbDB2315678afecb367f032d93F642f64180aa3" +ZkEVMAddr = "0xeb173087729c88a47568AF87b17C653039377BA6" +RollupManagerAddr = "0x2d42E2899662EFf08b13eeb65b154b904C7a1c8a" +GlobalExitRootManagerAddr = "0xB8cedD4B9eF683f0887C44a6E4312dC7A6e2fcdB" + [Etherman] URL="http://xlayer-mock-l1-network:8545" ForkIDChunkSize=100 @@ -49,7 +56,7 @@ GetBatchWaitInterval = "10s" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "ethtxmanager.sqlite" + StoragePath = "tmp/cdk/ethtxmanager.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 0 FinalizedStatusL1NumberOfBlocks = 0 @@ -124,7 +131,7 @@ SyncModeOnlyEnabled = false Outputs = ["stderr"] [Aggregator.Synchronizer.SQLDB] DriverName = "sqlite3" - DataSource = "file:/tmp/cdk/aggregator_sync_db.sqlite" + DataSource = "/tmp/cdk/aggregator_sync_db.sqlite" [Aggregator.Synchronizer.Synchronizer] SyncInterval = "10s" SyncChunkSize = 1000 @@ -153,13 +160,14 @@ SyncModeOnlyEnabled = false NumRequests = 1000 Interval = "1s" [ReorgDetectorL1] -DBPath = "/tmp/cdk/reorgdetectorl1" +DBPath = "/tmp/cdk/reorg_detector_l1.sqlite" -[ReorgDetectorL2] -DBPath = "/tmp/cdk/reorgdetectorl2" +# Only for AGGORACLE, RPC, AGGSENDER +#[ReorgDetectorL2] +#DBPath = "/tmp/cdk/reorg_detector_l2.sqlite" [L1InfoTreeSync] -DBPath = "/tmp/cdk/L1InfoTreeSync.sqlite" +DBPath = "/tmp/cdk/l1_info_tree_sync.sqlite" GlobalExitRootAddr="0xB8cedD4B9eF683f0887C44a6E4312dC7A6e2fcdB" RollupManagerAddr = "0x2d42E2899662EFf08b13eeb65b154b904C7a1c8a" SyncBlockChunkSize=10 @@ -168,123 +176,122 @@ URLRPCL1="http://xlayer-rpc:8545" WaitForNewBlocksPeriod="100ms" InitialBlock= 353 -[AggOracle] -TargetChainType="EVM" -URLRPCL1="" -BlockFinality="FinalizedBlock" -WaitPeriodNextGER="100ms" - [AggOracle.EVMSender] - GlobalExitRootL2="0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" - URLRPCL2="" - ChainIDL2=195 - GasOffset=0 - WaitPeriodMonitorTx="100ms" - SenderAddr="0x70997970c51812dc3a010c7d01b50e0d17dc79c8" - [AggOracle.EVMSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2s" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/app/keystore/aggoracle.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "/tmp/cdk/ethtxmanager-sequencesender.sqlite" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 5 - FinalizedStatusL1NumberOfBlocks = 10 - [AggOracle.EVMSender.EthTxManager.Etherman] - URL = "http://xlayer-mock-l1-network:8545" - MultiGasProvider = false - L1ChainID = 1337 - HTTPHeaders = [] +# Only for AGGORACLE, RPC, AGGSENDER +#[AggOracle] +#TargetChainType="EVM" +#URLRPCL1="" +#BlockFinality="FinalizedBlock" +#WaitPeriodNextGER="100ms" +# [AggOracle.EVMSender] +# GlobalExitRootL2="0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" +# URLRPCL2="" +# ChainIDL2=195 +# GasOffset=0 +# WaitPeriodMonitorTx="100ms" +# SenderAddr="0x70997970c51812dc3a010c7d01b50e0d17dc79c8" +# [AggOracle.EVMSender.EthTxManager] +# FrequencyToMonitorTxs = "1s" +# WaitTxToBeMined = "2s" +# GetReceiptMaxTime = "250ms" +# GetReceiptWaitInterval = "1s" +# PrivateKeys = [ +# {Path = "/app/keystore/aggoracle.keystore", Password = "testonly"}, +# ] +# ForcedGas = 0 +# GasPriceMarginFactor = 1 +# MaxGasPriceLimit = 0 +# StoragePath = "/tmp/cdk/ethtxmanager-sequencesender.sqlite" +# ReadPendingL1Txs = false +# SafeStatusL1NumberOfBlocks = 5 +# FinalizedStatusL1NumberOfBlocks = 10 +# [AggOracle.EVMSender.EthTxManager.Etherman] +# URL = "http://xlayer-mock-l1-network:8545" +# MultiGasProvider = false +# L1ChainID = 1337 +# HTTPHeaders = [] -[RPC] -Host = "0.0.0.0" -Port = 5576 -ReadTimeout = "2s" -WriteTimeout = "2s" -MaxRequestsPerIPAndSecond = 10 - -[ClaimSponsor] -DBPath = "/tmp/cdk/claimsopnsor.sqlite" -Enabled = true -SenderAddr = "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" -BridgeAddrL2 = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" -MaxGas = 200000 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitTxToBeMinedPeriod = "3s" -WaitOnEmptyQueue = "3s" -GasOffset = 0 - [ClaimSponsor.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2s" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/app/keystore/claimsopnsor.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "/tmp/cdk/ethtxmanager-claimsponsor.sqlite" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 5 - FinalizedStatusL1NumberOfBlocks = 10 - [ClaimSponsor.EthTxManager.Etherman] - URL = "http://xlayer-mock-l1-network:8545" - MultiGasProvider = false - L1ChainID = 1337 - HTTPHeaders = [] +# Only for RPC +#[RPC] +#Host = "0.0.0.0" +#Port = 5576 +#ReadTimeout = "2s" +#WriteTimeout = "2s" +#MaxRequestsPerIPAndSecond = 10 -[BridgeL1Sync] -DBPath = "/tmp/cdk/bridgel1sync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -BridgeAddr = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" -SyncBlockChunkSize = 100 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "3s" -OriginNetwork=0 +# Only for RPC +#[ClaimSponsor] +#DBPath = "/tmp/cdk/claimsopnsor.sqlite" +#Enabled = true +#SenderAddr = "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" +#BridgeAddrL2 = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" +#MaxGas = 200000 +#RetryAfterErrorPeriod = "1s" +#MaxRetryAttemptsAfterError = -1 +#WaitTxToBeMinedPeriod = "3s" +#WaitOnEmptyQueue = "3s" +#GasOffset = 0 +# [ClaimSponsor.EthTxManager] +# FrequencyToMonitorTxs = "1s" +# WaitTxToBeMined = "2s" +# GetReceiptMaxTime = "250ms" +# GetReceiptWaitInterval = "1s" +# PrivateKeys = [ +# {Path = "/app/keystore/claimsopnsor.keystore", Password = "testonly"}, +# ] +# ForcedGas = 0 +# GasPriceMarginFactor = 1 +# MaxGasPriceLimit = 0 +# StoragePath = "/tmp/cdk/ethtxmanager-claimsponsor.sqlite" +# ReadPendingL1Txs = false +# SafeStatusL1NumberOfBlocks = 5 +# FinalizedStatusL1NumberOfBlocks = 10 +# [ClaimSponsor.EthTxManager.Etherman] +# URL = "http://xlayer-mock-l1-network:8545" +# MultiGasProvider = false +# L1ChainID = 1337 +# HTTPHeaders = [] -[BridgeL2Sync] -DBPath = "/tmp/cdk/bridgel2sync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -BridgeAddr = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" -SyncBlockChunkSize = 100 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "3s" -OriginNetwork=1 +# Only for RPC +#[BridgeL1Sync] +#DBPath = "/tmp/cdk/bridgel1sync.sqlite" +#BlockFinality = "LatestBlock" +#InitialBlockNum = 0 +#BridgeAddr = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" +#SyncBlockChunkSize = 100 +#RetryAfterErrorPeriod = "1s" +#MaxRetryAttemptsAfterError = -1 +#WaitForNewBlocksPeriod = "3s" +#OriginNetwork=0 -[LastGERSync] -# MDBX database path -DBPath = "/tmp/cdk/lastgersync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -GlobalExitRootL2Addr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "1s" -DownloadBufferSize = 100 +# For RPC and aggsender +#[BridgeL2Sync] +#DBPath = "/tmp/cdk/bridgel2sync.sqlite" +#BlockFinality = "LatestBlock" +#InitialBlockNum = 0 +#BridgeAddr = "0x3a277Fa4E78cc1266F32E26c467F99A8eAEfF7c3" +#SyncBlockChunkSize = 100 +#RetryAfterErrorPeriod = "1s" +#MaxRetryAttemptsAfterError = -1 +#WaitForNewBlocksPeriod = "3s" +#OriginNetwork=1 -[NetworkConfig.L1] -L1ChainID = 1337 -PolAddr = "0x5FbDB2315678afecb367f032d93F642f64180aa3" -ZkEVMAddr = "0xeb173087729c88a47568AF87b17C653039377BA6" -RollupManagerAddr = "0x2d42E2899662EFf08b13eeb65b154b904C7a1c8a" -GlobalExitRootManagerAddr = "0xB8cedD4B9eF683f0887C44a6E4312dC7A6e2fcdB" +# Only for RPC +#[LastGERSync] +## MDBX database path +#DBPath = "/tmp/cdk/lastgersync.sqlite" +#BlockFinality = "LatestBlock" +#InitialBlockNum = 0 +#GlobalExitRootL2Addr = "0xa40d5f56745a118d0906a34e69aec8c0db1cb8fa" +#RetryAfterErrorPeriod = "1s" +#MaxRetryAttemptsAfterError = -1 +#WaitForNewBlocksPeriod = "1s" +#DownloadBufferSize = 100 -[AggSender] -StoragePath = "/tmp/cdk/aggsender.sqlite" -AggLayerURL = "" -AggsenderPrivateKey = {Path = "/pk/sequencer.keystore", Password = "testonly"} -BlockGetInterval = "2s" -URLRPCL2="http://xlayer-rpc:8545" -CheckSettledInterval = "2s" -SaveCertificatesToFiles = false +#[AggSender] +#StoragePath = "/tmp/cdk/aggsender.sqlite" +#AggLayerURL = "" +#AggsenderPrivateKey = {Path = "/pk/sequencer.keystore", Password = "testonly"} +#BlockGetInterval = "2s" +#URLRPCL2="http://xlayer-rpc:8545" +#CheckSettledInterval = "2s" +#SaveCertificatesToFiles = false diff --git a/test/config/ds-config.toml b/test/config/ds-config.toml new file mode 100644 index 00000000000..44615d44bf9 --- /dev/null +++ b/test/config/ds-config.toml @@ -0,0 +1,4 @@ +Server = "xlayer-seq:6900" +Port = 7900 +File = "/home/dsrelay/datarelay.bin" +Log = "info" \ No newline at end of file diff --git a/test/config/test.erigon.rpc.config.yaml b/test/config/test.erigon.rpc.config.yaml index 326a278b71b..147e8a6680d 100644 --- a/test/config/test.erigon.rpc.config.yaml +++ b/test/config/test.erigon.rpc.config.yaml @@ -4,7 +4,7 @@ http: true private.api.addr: localhost:9091 zkevm.l2-chain-id: 195 zkevm.l2-sequencer-rpc-url: http://xlayer-seq:8545 -zkevm.l2-datastreamer-url: xlayer-seq:6900 +zkevm.l2-datastreamer-url: xlayer-ds:7900 zkevm.l1-chain-id: 1337 zkevm.l1-rpc-url: http://xlayer-mock-l1-network:8545 diff --git a/test/docker-compose.yml b/test/docker-compose.yml index df401b83452..983b54ac5e0 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -6,6 +6,9 @@ services: xlayer-executor: container_name: xlayer-executor image: hermeznetwork/zkevm-prover:v9.0.0-RC1-fork.13 + platform: linux/amd64 + environment: + - EXPERIMENTAL_DOCKER_DESKTOP_FORCE_QEMU=1 ports: - 0.0.0.0:50061:50061 # MT - 0.0.0.0:50071:50071 # Executor @@ -14,32 +17,11 @@ services: command: > zkProver -c /usr/src/app/config.json - xlayer-state-db: - container_name: xlayer-state-db - image: postgres - ports: - - 5432:5432 - volumes: - - ./sql/init_prover_db.sql:/docker-entrypoint-initdb.d/init.sql - environment: - - POSTGRES_USER=state_user - - POSTGRES_PASSWORD=state_password - - POSTGRES_DB=state_db - command: - - "postgres" - - "-N" - - "500" - healthcheck: - test: ["CMD-SHELL", "pg_isready -U state_user -d state_db"] - interval: 5s - timeout: 5s - retries: 5 - start_period: 5s - xlayer-seqs: container_name: xlayer-seqs - image: zjg555543/cdk:v0.4.0-beta5 + image: zjg555543/cdk:v0.4.0-beta10 volumes: + - ./sqlite/seqs:/tmp/cdk - ./keystore/da.permit.keystore:/pk/da.permit.keystore - ./keystore/sequencer.keystore:/pk/sequencer.keystore - ./config/cdk.config.toml:/app/config.toml @@ -51,10 +33,11 @@ services: xlayer-agg: container_name: xlayer-agg - image: zjg555543/cdk:v0.4.0-beta5 + image: zjg555543/cdk:v0.4.0-beta10 ports: - 50081:50081 volumes: + - ./sqlite/agg:/tmp/cdk - ./keystore/aggregator.keystore:/pk/aggregator.keystore - ./config/cdk.config.toml:/app/config.toml - ./config/test.genesis.config.json:/app/genesis.json @@ -92,13 +75,13 @@ services: xlayer-prover: container_name: xlayer-prover image: hermeznetwork/zkevm-prover:v9.0.0-RC1-fork.13 + platform: linux/amd64 + environment: + - EXPERIMENTAL_DOCKER_DESKTOP_FORCE_QEMU=1 volumes: - ./config/test.prover.config.json:/usr/src/app/config.json command: > zkProver -c /usr/src/app/config.json - depends_on: - xlayer-state-db: - condition: service_healthy xlayer-approve: container_name: xlayer-approve @@ -352,3 +335,19 @@ services: - "/bin/sh" - "-c" - "/app/xlayer-signer http -cfg /app/config.toml" + + xlayer-ds: + container_name: xlayer-ds + build: + context: . + dockerfile: Dockerfile + restart: unless-stopped + image: zjg555543/xlayer-ds:basedev-merge-upstream-v0.2.7 + ports: + - 7900:7900 + volumes: + - ./config/ds-config.toml:/app/config.toml + command: + - "/bin/sh" + - "-c" + - "/app/dsrelay --cfg /app/config.toml" diff --git a/test/e2e/smoke_test.go b/test/e2e/smoke_test.go index 66bc4b76bf4..c97887161bc 100644 --- a/test/e2e/smoke_test.go +++ b/test/e2e/smoke_test.go @@ -44,7 +44,7 @@ func TestGetBatchSealTime(t *testing.T) { batchSealTime, err = operations.GetBatchSealTime(new(big.Int).SetUint64(batchNum)) require.Equal(t, batchSealTime, uint64(0)) log.Infof("Batch number: %d, times:%v", batchNum, i) - if batchNum > 0 { + if batchNum > 1 { break } time.Sleep(1 * time.Second) diff --git a/test/readme.md b/test/readme.md index 5849484e6bc..eb07c7d6dd9 100644 --- a/test/readme.md +++ b/test/readme.md @@ -26,7 +26,7 @@ make all; http://127.0.0.1:8090/ L1 OKB Token: 0x5FbDB2315678afecb367f032d93F642f64180aa3 -L2 WETH Token: 0x5d7AF92af4FF5a35323250D6ee174C23CCBe00EF +L2 WETH Token: 0x17a2a2e444a7f3446877d1b71eaa2b2ae7533baf L2 admin: 0x8f8E2d6cF621f30e9a11309D6A56A876281Fd534 ``` diff --git a/test/sql/init_event_db.sql b/test/sql/init_event_db.sql deleted file mode 100644 index c45a6ba1c0b..00000000000 --- a/test/sql/init_event_db.sql +++ /dev/null @@ -1,14 +0,0 @@ -CREATE TYPE level_t AS ENUM ('emerg', 'alert', 'crit', 'err', 'warning', 'notice', 'info', 'debug'); - -CREATE TABLE public.event ( - id BIGSERIAL PRIMARY KEY, - received_at timestamp WITH TIME ZONE default CURRENT_TIMESTAMP, - ip_address inet, - source varchar(32) not null, - component varchar(32), - level level_t not null, - event_id varchar(32) not null, - description text, - data bytea, - json jsonb -); diff --git a/test/sql/init_prover_db.sql b/test/sql/init_prover_db.sql deleted file mode 100644 index a4f8616c35c..00000000000 --- a/test/sql/init_prover_db.sql +++ /dev/null @@ -1,15 +0,0 @@ -CREATE DATABASE prover_db; -\connect prover_db; - -CREATE SCHEMA state; - -CREATE TABLE state.nodes (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); -CREATE TABLE state.program (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); - -CREATE USER prover_user with password 'prover_pass'; -ALTER DATABASE prover_db OWNER TO prover_user; -ALTER SCHEMA state OWNER TO prover_user; -ALTER SCHEMA public OWNER TO prover_user; -ALTER TABLE state.nodes OWNER TO prover_user; -ALTER TABLE state.program OWNER TO prover_user; -ALTER USER prover_user SET SEARCH_PATH=state; diff --git a/test/sql/single_db_server.sql b/test/sql/single_db_server.sql deleted file mode 100644 index 008bbb4c056..00000000000 --- a/test/sql/single_db_server.sql +++ /dev/null @@ -1,19 +0,0 @@ -CREATE DATABASE state_db; -CREATE DATABASE pool_db; -CREATE DATABASE rpc_db; - -CREATE DATABASE prover_db; -\connect prover_db; - -CREATE SCHEMA state; - -CREATE TABLE state.nodes (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); -CREATE TABLE state.program (hash BYTEA PRIMARY KEY, data BYTEA NOT NULL); - -CREATE USER prover_user with password 'prover_pass'; -ALTER DATABASE prover_db OWNER TO prover_user; -ALTER SCHEMA state OWNER TO prover_user; -ALTER SCHEMA public OWNER TO prover_user; -ALTER TABLE state.nodes OWNER TO prover_user; -ALTER TABLE state.program OWNER TO prover_user; -ALTER USER prover_user SET SEARCH_PATH=state; diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 035461d1083..bac9fb35279 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -328,4 +328,6 @@ var DefaultFlags = []cli.Flag{ &utils.ACLPrintHistory, &utils.InfoTreeUpdateInterval, &utils.SealBatchImmediatelyOnOverflow, + &utils.MockWitnessGeneration, + &utils.WitnessContractInclusion, } diff --git a/turbo/cli/flags_zkevm.go b/turbo/cli/flags_zkevm.go index 43d551137d9..06977ddeb34 100644 --- a/turbo/cli/flags_zkevm.go +++ b/turbo/cli/flags_zkevm.go @@ -131,6 +131,15 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { badBatches = append(badBatches, val) } + var witnessInclusion []libcommon.Address + for _, s := range strings.Split(ctx.String(utils.WitnessContractInclusion.Name), ",") { + if s == "" { + // if there are no entries then we can just ignore it and move on + continue + } + witnessInclusion = append(witnessInclusion, libcommon.HexToAddress(s)) + } + cfg.Zk = ðconfig.Zk{ L2ChainId: ctx.Uint64(utils.L2ChainIdFlag.Name), L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name), @@ -210,6 +219,8 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) { ACLPrintHistory: ctx.Int(utils.ACLPrintHistory.Name), InfoTreeUpdateInterval: ctx.Duration(utils.InfoTreeUpdateInterval.Name), SealBatchImmediatelyOnOverflow: ctx.Bool(utils.SealBatchImmediatelyOnOverflow.Name), + MockWitnessGeneration: ctx.Bool(utils.MockWitnessGeneration.Name), + WitnessContractInclusion: witnessInclusion, } // For X Layer diff --git a/turbo/jsonrpc/bor_helper.go b/turbo/jsonrpc/bor_helper.go index db0ad4ea60b..b9e826c00b7 100644 --- a/turbo/jsonrpc/bor_helper.go +++ b/turbo/jsonrpc/bor_helper.go @@ -58,7 +58,7 @@ func getHeaderByNumber(ctx context.Context, number rpc.BlockNumber, api *BorImpl return block.Header(), nil } - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/daemon.go b/turbo/jsonrpc/daemon.go index 84d0dbfd4ee..711bf6d5ce7 100644 --- a/turbo/jsonrpc/daemon.go +++ b/turbo/jsonrpc/daemon.go @@ -17,10 +17,9 @@ import ( "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/sequencer" "github.com/ledgerwatch/erigon/zk/syncer" - - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" txpool2 "github.com/ledgerwatch/erigon/zk/txpool" ) @@ -28,7 +27,7 @@ import ( func APIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, rawPool *txpool2.TxPool, mining txpool.MiningClient, filters *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, agg *libstate.Aggregator, cfg *httpcfg.HttpCfg, engine consensus.EngineReader, - ethCfg *ethconfig.Config, l1Syncer *syncer.L1Syncer, logger log.Logger, datastreamServer *datastreamer.StreamServer, + ethCfg *ethconfig.Config, l1Syncer *syncer.L1Syncer, logger log.Logger, dataStreamServer server.DataStreamServer, ) (list []rpc.API, gpCache *GasPriceCache) { // non-sequencer nodes should forward on requests to the sequencer rpcUrl := "" @@ -69,7 +68,7 @@ func APIList(db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, r otsImpl := NewOtterscanAPI(base, db, cfg.OtsMaxPageSize) gqlImpl := NewGraphQLAPI(base, db) overlayImpl := NewOverlayAPI(base, db, cfg.Gascap, cfg.OverlayGetLogsTimeout, cfg.OverlayReplayBlockTimeout, otsImpl) - zkEvmImpl := NewZkEvmAPI(ethImpl, db, cfg.ReturnDataLimit, ethCfg, l1Syncer, rpcUrl, datastreamServer) + zkEvmImpl := NewZkEvmAPI(ethImpl, db, cfg.ReturnDataLimit, ethCfg, l1Syncer, rpcUrl, dataStreamServer) if cfg.GraphQLEnabled { list = append(list, rpc.API{ diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index 095dc8c0ec1..1f65b7d47c4 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -375,7 +375,8 @@ func (api *PrivateDebugAPIImpl) GetRawHeader(ctx context.Context, blockNrOrHash return nil, err } defer tx.Rollback() - n, h, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + + n, h, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -395,7 +396,7 @@ func (api *PrivateDebugAPIImpl) GetRawBlock(ctx context.Context, blockNrOrHash r return nil, err } defer tx.Rollback() - n, h, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/erigon_block.go b/turbo/jsonrpc/erigon_block.go index f6ef01ef1cb..32a401224ef 100644 --- a/turbo/jsonrpc/erigon_block.go +++ b/turbo/jsonrpc/erigon_block.go @@ -43,7 +43,7 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl } defer tx.Rollback() - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNumber), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNumber), tx, api.filters) if err != nil { return nil, err } @@ -213,7 +213,7 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa return nil, err } - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/erigon_receipts.go b/turbo/jsonrpc/erigon_receipts.go index a738e19b509..17c6cb79b91 100644 --- a/turbo/jsonrpc/erigon_receipts.go +++ b/turbo/jsonrpc/erigon_receipts.go @@ -407,7 +407,7 @@ func (api *ErigonImpl) GetBlockReceiptsByBlockHash(ctx context.Context, cannonic } } - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithHash(cannonicalBlockHash, true), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithHash(cannonicalBlockHash, true), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index f80b2e18e61..b868ea24501 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -288,7 +288,7 @@ func (api *BaseAPI) pendingBlock() *types.Block { } func (api *BaseAPI) blockByRPCNumber(ctx context.Context, number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { - n, h, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -299,7 +299,7 @@ func (api *BaseAPI) blockByRPCNumber(ctx context.Context, number rpc.BlockNumber } func (api *BaseAPI) headerByRPCNumber(ctx context.Context, number rpc.BlockNumber, tx kv.Tx) (*types.Header, error) { - n, h, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, h, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -320,7 +320,7 @@ func (api *BaseAPI) checkPruneHistory(tx kv.Tx, block uint64) error { return nil } if p.History.Enabled() { - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, api.filters) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, api.filters) if err != nil { return err } diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index d0155d6c854..2c66239b379 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -77,7 +77,7 @@ func (api *APIImpl) deprecated_CallBundle(ctx context.Context, txHashes []common } defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) + stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber_zkevm(stateBlockNumberOrHash, tx, api.filters) if err != nil { return nil, err } @@ -341,7 +341,7 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN return &n, nil } - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } @@ -388,7 +388,7 @@ func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHas } defer tx.Rollback() - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHash{BlockHash: &blockHash}, tx, nil) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHash{BlockHash: &blockHash}, tx, nil) if err != nil { // (Compatibility) Every other node just return `null` for when the block does not exist. log.Debug("eth_getBlockTransactionCountByHash GetBlockNumber failed", "err", err) diff --git a/turbo/jsonrpc/eth_block_zkevm.go b/turbo/jsonrpc/eth_block_zkevm.go index d0a7d87c77f..6f82477f685 100644 --- a/turbo/jsonrpc/eth_block_zkevm.go +++ b/turbo/jsonrpc/eth_block_zkevm.go @@ -75,7 +75,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat } defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) + stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber_zkevm(stateBlockNumberOrHash, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index 690bb746fc3..188ff13331c 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -56,7 +56,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa args.Gas = (*hexutil.Uint64)(&api.GasCap) } - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa // headerByNumberOrHash - intent to read recent headers only, tries from the lru cache before reading from the db func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, api *APIImpl) (*types.Header, error) { - _, bNrOrHashHash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) + _, bNrOrHashHash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.Block return block.Header(), nil } - blockNum, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -227,7 +227,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs } engine := api.engine() - latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks + latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber_zkevm(bNrOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return 0, err } @@ -329,7 +329,7 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNr, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, } engine := api.engine() - blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber_zkevm(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index 5aa0d59bf9a..f1748f53a83 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -106,7 +106,7 @@ func (api *APIImpl) CallMany_deprecated(ctx context.Context, bundles []Bundle, s defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_callMany_zkevm.go b/turbo/jsonrpc/eth_callMany_zkevm.go index d7b1c03f925..0c90a63e5f2 100644 --- a/turbo/jsonrpc/eth_callMany_zkevm.go +++ b/turbo/jsonrpc/eth_callMany_zkevm.go @@ -62,7 +62,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index e95acd6dcab..1dd8b622468 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -118,7 +118,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (t end = header.Number.Uint64() } else { // Convert the RPC block numbers into internal representations - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, nil) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), tx, nil) if err != nil { return nil, err } @@ -691,7 +691,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber } defer tx.Rollback() - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(*number.BlockNumber), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(*number.BlockNumber), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_txs.go b/turbo/jsonrpc/eth_txs.go index 530fe2352a3..7a4473ab3ae 100644 --- a/turbo/jsonrpc/eth_txs.go +++ b/turbo/jsonrpc/eth_txs.go @@ -236,7 +236,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex_deprecated(ctx context.C } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_txs_zkevm.go b/turbo/jsonrpc/eth_txs_zkevm.go index ac4cb12de00..1fc0fc26788 100644 --- a/turbo/jsonrpc/eth_txs_zkevm.go +++ b/turbo/jsonrpc/eth_txs_zkevm.go @@ -197,7 +197,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_uncles.go b/turbo/jsonrpc/eth_uncles.go index f0fdeb646e2..66c3a7bcbf0 100644 --- a/turbo/jsonrpc/eth_uncles.go +++ b/turbo/jsonrpc/eth_uncles.go @@ -32,7 +32,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp } defer tx.Rollback() - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -102,7 +102,7 @@ func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.B } defer tx.Rollback() - blockNum, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockNum, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return &n, err } diff --git a/turbo/jsonrpc/graphql_api.go b/turbo/jsonrpc/graphql_api.go index 44eff638c60..e5b4034b30f 100644 --- a/turbo/jsonrpc/graphql_api.go +++ b/turbo/jsonrpc/graphql_api.go @@ -101,7 +101,7 @@ func (api *GraphQLAPIImpl) getBlockWithSenders(ctx context.Context, number rpc.B return api.pendingBlock(), nil, nil } - blockHeight, blockHash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + blockHeight, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/otterscan_api.go b/turbo/jsonrpc/otterscan_api.go index 9b925903547..54b07f102bb 100644 --- a/turbo/jsonrpc/otterscan_api.go +++ b/turbo/jsonrpc/otterscan_api.go @@ -569,7 +569,7 @@ func (api *OtterscanAPIImpl) getBlockWithSenders(ctx context.Context, number rpc return api.pendingBlock(), nil, nil } - n, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + n, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/otterscan_has_code.go b/turbo/jsonrpc/otterscan_has_code.go index af442e8d000..8f9bfd1fe55 100644 --- a/turbo/jsonrpc/otterscan_has_code.go +++ b/turbo/jsonrpc/otterscan_has_code.go @@ -17,7 +17,7 @@ func (api *OtterscanAPIImpl) HasCode(ctx context.Context, address common.Address } defer tx.Rollback() - blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return false, err } diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index 0b6949f5b87..856452959c4 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -420,7 +420,7 @@ func (api *OverlayAPIImpl) replayBlock(ctx context.Context, blockNum uint64, sta overrideBlockHash = make(map[uint64]common.Hash) blockNumber := rpc.BlockNumber(blockNum) - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHash{BlockNumber: &blockNumber}, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHash{BlockNumber: &blockNumber}, tx, api.filters) if err != nil { return nil, err } @@ -580,7 +580,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter end = num } else { // Convert the RPC block numbers into internal representations - latest, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil) + latest, _, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(rpc.LatestExecutedBlockNumber), tx, nil) if err != nil { return 0, 0, err } @@ -592,7 +592,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter begin = uint64(fromBlock) } else { blockNum := rpc.BlockNumber(fromBlock) - begin, _, _, err = rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) + begin, _, _, err = rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) if err != nil { return 0, 0, err } @@ -606,7 +606,7 @@ func getBeginEnd(ctx context.Context, tx kv.Tx, api *OverlayAPIImpl, crit filter end = uint64(toBlock) } else { blockNum := rpc.BlockNumber(toBlock) - end, _, _, err = rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) + end, _, _, err = rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNum), tx, api.filters) if err != nil { return 0, 0, err } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 99f7d259932..224127f4429 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -848,7 +848,7 @@ func (api *TraceAPIImpl) ReplayBlockTransactions(ctx context.Context, blockNrOrH return nil, err } - blockNumber, blockHash, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, blockHash, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -923,7 +923,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp blockNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*blockNrOrHash, tx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -1095,7 +1095,7 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa var num = rpc.LatestBlockNumber parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*parentNrOrHash, dbtx, api.filters) if err != nil { return nil, err } @@ -1141,7 +1141,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type var num = rpc.LatestBlockNumber parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} } - blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + blockNumber, hash, _, err := rpchelper.GetBlockNumber_zkevm(*parentNrOrHash, dbtx, api.filters) if err != nil { return nil, nil, err } diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index 66dd279e23a..2cf36b3768d 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -174,7 +174,7 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber, gas return nil, err } defer tx.Rollback() - blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index 4372a0692b7..31f1efb9d87 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -62,7 +62,7 @@ func (api *PrivateDebugAPIImpl) traceBlock_deprecated(ctx context.Context, block return fmt.Errorf("invalid arguments; neither block nor hash specified") } - blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { stream.WriteNil() return err @@ -307,7 +307,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA } engine := api.engine() - blockNumber, hash, isLatest, err := rpchelper.GetBlockNumber(blockNrOrHash, dbtx, api.filters) + blockNumber, hash, isLatest, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, dbtx, api.filters) if err != nil { return fmt.Errorf("get block number: %v", err) } @@ -405,7 +405,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany_deprecated(ctx context.Context, bu defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { stream.WriteNil() return err diff --git a/turbo/jsonrpc/tracing_zkevm.go b/turbo/jsonrpc/tracing_zkevm.go index 6ea4719f881..145e64265de 100644 --- a/turbo/jsonrpc/tracing_zkevm.go +++ b/turbo/jsonrpc/tracing_zkevm.go @@ -209,7 +209,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) - blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber_zkevm(simulateContext.BlockNumber, tx, api.filters) if err != nil { stream.WriteNil() return err diff --git a/turbo/jsonrpc/zkevm_api.go b/turbo/jsonrpc/zkevm_api.go index 14dd0b77c83..4f3968f3d17 100644 --- a/turbo/jsonrpc/zkevm_api.go +++ b/turbo/jsonrpc/zkevm_api.go @@ -15,7 +15,6 @@ import ( "github.com/ledgerwatch/log/v3" zktypes "github.com/ledgerwatch/erigon/zk/types" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" @@ -45,6 +44,8 @@ import ( "github.com/ledgerwatch/erigon/zk/witness" "github.com/ledgerwatch/erigon/zkevm/hex" "github.com/ledgerwatch/erigon/zkevm/jsonrpc/client" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "math" ) var sha3UncleHash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") @@ -77,6 +78,8 @@ type ZkEvmAPI interface { GetForkById(ctx context.Context, forkId hexutil.Uint64) (res json.RawMessage, err error) GetForkIdByBatchNumber(ctx context.Context, batchNumber rpc.BlockNumber) (hexutil.Uint64, error) GetForks(ctx context.Context) (res json.RawMessage, err error) + GetRollupAddress(ctx context.Context) (res json.RawMessage, err error) + GetRollupManagerAddress(ctx context.Context) (res json.RawMessage, err error) } const getBatchWitness = "getBatchWitness" @@ -91,7 +94,7 @@ type ZkEvmAPIImpl struct { l1Syncer *syncer.L1Syncer l2SequencerUrl string semaphores map[string]chan struct{} - datastreamServer *server.DataStreamServer + datastreamServer server.DataStreamServer } func (api *ZkEvmAPIImpl) initializeSemaphores(functionLimits map[string]int) { @@ -112,14 +115,9 @@ func NewZkEvmAPI( zkConfig *ethconfig.Config, l1Syncer *syncer.L1Syncer, l2SequencerUrl string, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, ) *ZkEvmAPIImpl { - var streamServer *server.DataStreamServer - if datastreamServer != nil { - streamServer = server.NewDataStreamServer(datastreamServer, zkConfig.Zk.L2ChainId) - } - a := &ZkEvmAPIImpl{ ethApi: base, db: db, @@ -127,7 +125,7 @@ func NewZkEvmAPI( config: zkConfig, l1Syncer: l1Syncer, l2SequencerUrl: l2SequencerUrl, - datastreamServer: streamServer, + datastreamServer: dataStreamServer, } a.initializeSemaphores(map[string]int{ @@ -551,10 +549,13 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp batch.Timestamp = types.ArgUint64(block.Time()) } - // if we don't have a datastream available to verify that a batch is actually - // closed then we fall back to existing behaviour of checking if the next batch - // has any blocks in it - if api.datastreamServer != nil { + /* + if node is a sequencer it won't have the required data stored in the db, so use the datastream + server to figure out if the batch is closed, otherwise fall back. This ensures good performance + for RPC nodes in daisy chain node which do have a datastream (previous check was testing for + presence of datastream server). + */ + if sequencer.IsSequencer() { highestClosed, err := api.datastreamServer.GetHighestClosedBatchNoCache() if err != nil { return nil, err @@ -573,7 +574,20 @@ func (api *ZkEvmAPIImpl) GetBatchByNumber(ctx context.Context, rpcBatchNumber rp return nil, err } - batch.Closed = batchNo <= latestClosedbatchNum + if batchNo <= latestClosedbatchNum { + // simple check if we have a closed batch entry higher than or equal to the one requested + batch.Closed = true + } else { + // we might be missing a batch end along the way so lets double check if we have a block + // from the next batch or not + _, foundHigher, err := hermezDb.GetLowestBlockInBatch(batchNo + 1) + if err != nil { + return nil, err + } + if foundHigher { + batch.Closed = true + } + } } // verification - if we can't find one, maybe this batch was verified along with a higher batch number @@ -718,7 +732,25 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, } if prevSequence == nil || batchSequence == nil { - return nil, fmt.Errorf("failed to get sequence data for batch %d", batchNum) + var missing string + if prevSequence == nil && batchSequence == nil { + missing = "previous and current batch sequences" + } else if prevSequence == nil { + missing = "previous batch sequence" + } else { + missing = "current batch sequence" + } + return nil, fmt.Errorf("failed to get %s for batch %d", missing, batchNum) + } + + // if we are asking for the injected batch or genesis return 0x0..0 + if (batchNum == 0 || batchNum == 1) && prevSequence.BatchNo == 0 { + return &common.Hash{}, nil + } + + // if prev is 0, set to 1 (injected batch) + if prevSequence.BatchNo == 0 { + prevSequence.BatchNo = 1 } // get batch range for sequence @@ -764,6 +796,7 @@ func (api *ZkEvmAPIImpl) getAccInputHash(ctx context.Context, db SequenceReader, // calculate acc input hash for i := 0; i < int(batchNum-prevSequenceBatch); i++ { accInputHash = accInputHashCalcFn(prevSequenceAccinputHash, i) + prevSequenceAccinputHash = *accInputHash } return @@ -986,6 +1019,7 @@ func (api *ZkEvmAPIImpl) buildGenerator(ctx context.Context, tx kv.Tx, witnessMo chainConfig, api.config.Zk, api.ethApi._engine, + api.config.WitnessContractInclusion, ) fullWitness := false @@ -1009,12 +1043,12 @@ func (api *ZkEvmAPIImpl) getBlockRangeWitness(ctx context.Context, db kv.RoDB, s return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetCanonicalBlockNumber(startBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks + blockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(startBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } - endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks + endBlockNr, _, _, err := rpchelper.GetCanonicalBlockNumber_zkevm(endBlockNrOrHash, tx, api.ethApi.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err @@ -1608,7 +1642,7 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, return nil, fmt.Errorf("not supported by Erigon3") } - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + blockNr, _, _, err := rpchelper.GetBlockNumber_zkevm(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } @@ -1672,7 +1706,31 @@ func (zkapi *ZkEvmAPIImpl) GetProof(ctx context.Context, address common.Address, ibs.GetState(address, &key, value) } - rl, err := tds.ResolveSMTRetainList() + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + chainCfg, err := api.chainConfig(ctx, tx) + if err != nil { + return nil, err + } + + plainState := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainCfg.ChainName]) + defer plainState.Close() + + inclusion := make(map[libcommon.Address][]libcommon.Hash) + for _, contract := range zkapi.config.WitnessContractInclusion { + err = plainState.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + + rl, err := tds.ResolveSMTRetainList(inclusion) if err != nil { return nil, err } @@ -1846,3 +1904,25 @@ func (api *ZkEvmAPIImpl) GetForks(ctx context.Context) (res json.RawMessage, err return forksJson, err } + +func (api *ZkEvmAPIImpl) GetRollupAddress(ctx context.Context) (res json.RawMessage, err error) { + rollupAddress := api.config.AddressZkevm + + rollupAddressJson, err := json.Marshal(rollupAddress) + if err != nil { + return nil, err + } + + return rollupAddressJson, err +} + +func (api *ZkEvmAPIImpl) GetRollupManagerAddress(ctx context.Context) (res json.RawMessage, err error) { + rollupManagerAddress := api.config.AddressRollup + + rollupManagerAddressJson, err := json.Marshal(rollupManagerAddress) + if err != nil { + return nil, err + } + + return rollupManagerAddressJson, err +} diff --git a/turbo/jsonrpc/zkevm_api_test.go b/turbo/jsonrpc/zkevm_api_test.go index 98d48ab2e24..c9cda1e73f8 100644 --- a/turbo/jsonrpc/zkevm_api_test.go +++ b/turbo/jsonrpc/zkevm_api_test.go @@ -1448,3 +1448,77 @@ func TestGetForks(t *testing.T) { assert.Equal(forks[2].Version, "") assert.Equal(forks[2].BlockNumber, hexutil.Uint64(3000)) } + +func TestGetRollupAddress(t *testing.T) { + assert := assert.New(t) + + ////////////// + contractBackend := backends.NewTestSimulatedBackendWithConfig(t, gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + contractBackend.Commit() + /////////// + + db := contractBackend.DB() + agg := contractBackend.Agg() + + baseApi := NewBaseApi(nil, stateCache, contractBackend.BlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout, contractBackend.Engine(), datadir.New(t.TempDir())) + ethImpl := NewEthAPI(baseApi, db, nil, nil, nil, 5000000, 100_000, 100_000, ðconfig.Defaults, false, 100, 100, log.New()) + var l1Syncer *syncer.L1Syncer + zkEvmImpl := NewZkEvmAPI(ethImpl, db, 100_000, ðconfig.Defaults, l1Syncer, "", nil) + + // Call the GetRollupAddress method and check that the result matches the default value. + var result common.Address + rollupAddress, err := zkEvmImpl.GetRollupAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x0")) + + // Modify the ZkConfig and retry calling the method. + zkEvmImpl.config.AddressZkevm = common.HexToAddress("0x1") + rollupAddress, err = zkEvmImpl.GetRollupAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x1")) +} + +func TestGetRollupManagerAddress(t *testing.T) { + assert := assert.New(t) + + ////////////// + contractBackend := backends.NewTestSimulatedBackendWithConfig(t, gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + contractBackend.Commit() + /////////// + + db := contractBackend.DB() + agg := contractBackend.Agg() + + baseApi := NewBaseApi(nil, stateCache, contractBackend.BlockReader(), agg, false, rpccfg.DefaultEvmCallTimeout, contractBackend.Engine(), datadir.New(t.TempDir())) + ethImpl := NewEthAPI(baseApi, db, nil, nil, nil, 5000000, 100_000, 100_000, ðconfig.Defaults, false, 100, 100, log.New()) + var l1Syncer *syncer.L1Syncer + zkEvmImpl := NewZkEvmAPI(ethImpl, db, 100_000, ðconfig.Defaults, l1Syncer, "", nil) + + // Call the GetRollupManagerAddress method and check that the result matches the default value. + var result common.Address + rollupManagerAddress, err := zkEvmImpl.GetRollupManagerAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupManagerAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x0")) + + // Modify the ZkConfig and retry calling the method. + zkEvmImpl.config.AddressRollup = common.HexToAddress("0x1") + rollupManagerAddress, err = zkEvmImpl.GetRollupManagerAddress(ctx) + assert.NoError(err) + + err = json.Unmarshal(rollupManagerAddress, &result) + assert.NoError(err) + assert.Equal(result, common.HexToAddress("0x1")) +} diff --git a/turbo/jsonrpc/zkevm_counters.go b/turbo/jsonrpc/zkevm_counters.go index 7d28b26ad21..00106390d85 100644 --- a/turbo/jsonrpc/zkevm_counters.go +++ b/turbo/jsonrpc/zkevm_counters.go @@ -130,7 +130,7 @@ func (zkapi *ZkEvmAPIImpl) EstimateCounters(ctx context.Context, rpcTx *zkevmRPC } engine := api.engine() - latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks + latestCanBlockNumber, latestCanHash, isLatest, err := rpchelper.GetCanonicalBlockNumber_zkevm(latestNumOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks if err != nil { return nil, err } diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 4c09d49d774..7cecd4826d5 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -17,6 +17,7 @@ import ( borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/zk/sequencer" ) // unable to decode supplied params, or an invalid number of parameters @@ -37,9 +38,18 @@ func GetCanonicalBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filt } func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash libcommon.Hash, latest bool, err error) { - finishedBlockNumber, err := stages.GetStageProgress(tx, stages.Finish) - if err != nil { - return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + var finishedBlockNumber uint64 + + if !sequencer.IsSequencer() { + finishedBlockNumber, err = stages.GetStageProgress(tx, stages.Finish) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } + } else { + finishedBlockNumber, err = stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } } var ok bool diff --git a/turbo/rpchelper/helper_zkevm.go b/turbo/rpchelper/helper_zkevm.go index 754d42c1cbd..659c23df7de 100644 --- a/turbo/rpchelper/helper_zkevm.go +++ b/turbo/rpchelper/helper_zkevm.go @@ -1,12 +1,18 @@ package rpchelper import ( + "errors" "fmt" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" + "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/sequencer" ) func GetBatchNumber(rpcBatchNumber rpc.BlockNumber, tx kv.Tx, filters *Filters) (batchNumber uint64, latest bool, err error) { @@ -41,3 +47,96 @@ func GetBatchNumber(rpcBatchNumber rpc.BlockNumber, tx kv.Tx, filters *Filters) return batchNumber, latest, nil } + +func GetBlockNumber_zkevm(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, libcommon.Hash, bool, error) { + return _GetBlockNumber_zkevm(blockNrOrHash.RequireCanonical, blockNrOrHash, tx, filters) +} + +func GetCanonicalBlockNumber_zkevm(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, libcommon.Hash, bool, error) { + return _GetBlockNumber_zkevm(true, blockNrOrHash, tx, filters) +} + +func _GetBlockNumber_zkevm(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash libcommon.Hash, latest bool, err error) { + blockFinalizationType := stages.Finish + if sequencer.IsSequencer() { + blockFinalizationType = stages.Execution + } + + finishedBlockNumber, err := stages.GetStageProgress(tx, blockFinalizationType) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting finished block number: %w", err) + } + + var ok bool + hash, ok = blockNrOrHash.Hash() + if !ok { + number := *blockNrOrHash.BlockNumber + switch number { + case rpc.LatestBlockNumber: + if blockNumber, err = GetLatestFinishedBlockNumber(tx); err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.EarliestBlockNumber: + blockNumber = 0 + case rpc.FinalizedBlockNumber: + if whitelist.GetWhitelistingService() != nil { + num := borfinality.GetFinalizedBlockNumber(tx) + if num == 0 { + // nolint + return 0, libcommon.Hash{}, false, errors.New("No finalized block") + } + + blockNum := borfinality.CurrentFinalizedBlock(tx, num).NumberU64() + blockHash := rawdb.ReadHeaderByNumber(tx, blockNum).Hash() + return blockNum, blockHash, false, nil + } + blockNumber, err = GetFinalizedBlockNumber(tx) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.SafeBlockNumber: + // [zkevm] safe not available, returns finilized instead + // blockNumber, err = GetSafeBlockNumber(tx) + blockNumber, err = GetFinalizedBlockNumber(tx) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + case rpc.PendingBlockNumber: + pendingBlock := filters.LastPendingBlock() + if pendingBlock == nil { + blockNumber = finishedBlockNumber + } else { + return pendingBlock.NumberU64(), pendingBlock.Hash(), false, nil + } + case rpc.LatestExecutedBlockNumber: + blockNumber, err = stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("getting latest executed block number: %w", err) + } + default: + blockNumber = uint64(number.Int64()) + if blockNumber > finishedBlockNumber { + return 0, libcommon.Hash{}, false, fmt.Errorf("block with number %d not found", blockNumber) + } + } + hash, err = rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + } else { + number := rawdb.ReadHeaderNumber(tx, hash) + if number == nil { + return 0, libcommon.Hash{}, false, fmt.Errorf("block %x not found", hash) + } + blockNumber = *number + + ch, err := rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return 0, libcommon.Hash{}, false, err + } + if requireCanonical && ch != hash { + return 0, libcommon.Hash{}, false, nonCanonocalHashError{hash} + } + } + return blockNumber, hash, blockNumber == finishedBlockNumber, nil +} diff --git a/turbo/stages/zk_stages.go b/turbo/stages/zk_stages.go index 1a796e37250..a585503c0e0 100644 --- a/turbo/stages/zk_stages.go +++ b/turbo/stages/zk_stages.go @@ -3,7 +3,6 @@ package stages import ( "context" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/state" @@ -16,11 +15,12 @@ import ( "github.com/ledgerwatch/erigon/turbo/engineapi/engine_helpers" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" + "github.com/ledgerwatch/erigon/zk/datastream/server" + "github.com/ledgerwatch/erigon/zk/l1infotree" "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" zkStages "github.com/ledgerwatch/erigon/zk/stages" "github.com/ledgerwatch/erigon/zk/syncer" "github.com/ledgerwatch/erigon/zk/txpool" - "github.com/ledgerwatch/erigon/zk/l1infotree" ) // NewDefaultZkStages creates stages for zk syncer (RPC mode) @@ -36,7 +36,7 @@ func NewDefaultZkStages(ctx context.Context, engine consensus.Engine, l1Syncer *syncer.L1Syncer, datastreamClient zkStages.DatastreamClient, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, infoTreeUpdater *l1infotree.Updater, ) []*stagedsync.Stage { dirs := cfg.Dirs @@ -54,7 +54,7 @@ func NewDefaultZkStages(ctx context.Context, zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk), zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner), - zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), + zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( @@ -99,7 +99,7 @@ func NewSequencerZkStages(ctx context.Context, agg *state.Aggregator, forkValidator *engine_helpers.ForkValidator, engine consensus.Engine, - datastreamServer *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, sequencerStageSyncer *syncer.L1Syncer, l1Syncer *syncer.L1Syncer, l1BlockSyncer *syncer.L1Syncer, @@ -120,7 +120,7 @@ func NewSequencerZkStages(ctx context.Context, zkStages.StageL1SequencerSyncCfg(db, cfg.Zk, sequencerStageSyncer), zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater), zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer), - zkStages.StageDataStreamCatchupCfg(datastreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), + zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()), zkStages.StageSequenceBlocksCfg( db, cfg.Prune, @@ -138,7 +138,7 @@ func NewSequencerZkStages(ctx context.Context, cfg.Genesis, cfg.Sync, agg, - datastreamServer, + dataStreamServer, cfg.Zk, &cfg.Miner, txPool, diff --git a/zk/datastream/client/stream_client.go b/zk/datastream/client/stream_client.go index e10471863a1..1c536b87095 100644 --- a/zk/datastream/client/stream_client.go +++ b/zk/datastream/client/stream_client.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" "github.com/ledgerwatch/erigon/zk/datastream/types" "github.com/ledgerwatch/log/v3" + "sync" ) type StreamType uint64 @@ -49,7 +50,8 @@ type StreamClient struct { // atomic lastWrittenTime atomic.Int64 - streaming atomic.Bool + mtxStreaming *sync.Mutex + streaming bool progress atomic.Uint64 stopReadingToChannel atomic.Bool @@ -58,6 +60,11 @@ type StreamClient struct { // keeps track of the latest fork from the stream to assign to l2 blocks currentFork uint64 + + // used for testing, during normal execution lots of stop streaming commands are sent + // which makes sense for an active server listening for these things but in unit tests + // this makes behaviour very unpredictable and hard to test + allowStops bool } const ( @@ -83,6 +90,7 @@ func NewClient(ctx context.Context, server string, version int, checkTimeout tim streamType: StSequencer, entryChan: make(chan interface{}, 100000), currentFork: uint64(latestDownloadedForkId), + mtxStreaming: &sync.Mutex{}, } return c @@ -133,7 +141,9 @@ func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.Ful if errors.Is(err, types.ErrAlreadyStarted) { // if the client is already started, we can stop the client and try again - c.Stop() + if errStop := c.Stop(); errStop != nil { + log.Warn("failed to send stop command", "error", errStop) + } } else if !errors.Is(err, ErrSocket) { return nil, fmt.Errorf("getL2BlockByNumber: %w", err) } @@ -142,6 +152,7 @@ func (c *StreamClient) GetL2BlockByNumber(blockNum uint64) (fullBLock *types.Ful time.Sleep(1 * time.Second) connected = c.handleSocketError(err) count++ + err = nil } return fullBLock, nil @@ -182,6 +193,10 @@ func (c *StreamClient) getL2BlockByNumber(blockNum uint64) (l2Block *types.FullL return nil, fmt.Errorf("expected block number %d but got %d", blockNum, l2Block.L2BlockNumber) } + if err := c.Stop(); err != nil { + return nil, fmt.Errorf("Stop: %w", err) + } + return l2Block, nil } @@ -203,16 +218,25 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error return nil, ErrFailedAttempts } if connected { - if err := c.stopStreamingIfStarted(); err != nil { - return nil, fmt.Errorf("stopStreamingIfStarted: %w", err) + if err = c.stopStreamingIfStarted(); err != nil { + err = fmt.Errorf("stopStreamingIfStarted: %w", err) } - - if l2Block, err = c.getLatestL2Block(); err == nil { - break + if err == nil { + if l2Block, err = c.getLatestL2Block(); err == nil { + break + } + err = fmt.Errorf("getLatestL2Block: %w", err) } - if !errors.Is(err, ErrSocket) { - return nil, fmt.Errorf("getLatestL2Block: %w", err) + + if err != nil && !errors.Is(err, ErrSocket) { + return nil, err + } else if errors.Is(err, types.ErrAlreadyStarted) { + // if the client is already started, we can stop the client and try again + if errStop := c.Stop(); errStop != nil { + log.Warn("failed to send stop command", "error", errStop) + } } + err = nil } time.Sleep(1 * time.Second) @@ -222,17 +246,31 @@ func (c *StreamClient) GetLatestL2Block() (l2Block *types.FullL2Block, err error return l2Block, nil } +func (c *StreamClient) getStreaming() bool { + c.mtxStreaming.Lock() + defer c.mtxStreaming.Unlock() + return c.streaming +} + +func (c *StreamClient) setStreaming(val bool) { + c.mtxStreaming.Lock() + defer c.mtxStreaming.Unlock() + c.streaming = val +} + // don't check for errors here, we just need to empty the socket for next reads func (c *StreamClient) stopStreamingIfStarted() error { - if c.streaming.Load() { - c.sendStopCmd() - c.streaming.Store(false) + if c.getStreaming() { + if err := c.sendStopCmd(); err != nil { + return fmt.Errorf("sendStopCmd: %w", err) + } + c.setStreaming(false) } // empty the socket buffer for { - c.conn.SetReadDeadline(time.Now().Add(100)) - if _, err := c.readBuffer(100); err != nil { + c.conn.SetReadDeadline(time.Now().Add(1 * time.Millisecond)) + if _, err := readBuffer(c.conn, 1000 /* arbitrary number*/); err != nil { break } } @@ -271,6 +309,10 @@ func (c *StreamClient) getLatestL2Block() (l2Block *types.FullL2Block, err error return nil, errors.New("no block found") } + if err := c.Stop(); err != nil { + return nil, fmt.Errorf("Stop: %w", err) + } + return l2Block, nil } @@ -294,15 +336,15 @@ func (c *StreamClient) Start() error { return nil } -func (c *StreamClient) Stop() { - if c.conn == nil { - return +func (c *StreamClient) Stop() error { + if c.conn == nil || !c.allowStops { + return nil } if err := c.sendStopCmd(); err != nil { - log.Warn(fmt.Sprintf("send stop command: %v", err)) + return fmt.Errorf("sendStopCmd: %w", err) } - // c.conn.Close() - // c.conn = nil + + return nil } // Command header: Get status @@ -467,7 +509,7 @@ func (c *StreamClient) handleSocketError(socketErr error) bool { // reads entries to the end of the stream // at end will wait for new entries to arrive func (c *StreamClient) readAllEntriesToChannel() (err error) { - c.streaming.Store(true) + c.setStreaming(true) c.stopReadingToChannel.Store(false) var bookmark *types.BookmarkProto @@ -502,6 +544,8 @@ func (c *StreamClient) initiateDownloadBookmark(bookmark []byte) (*types.ResultE return nil, fmt.Errorf("sendBookmarkCmd: %w", err) } + c.setStreaming(true) + re, err := c.afterStartCommand() if err != nil { return re, fmt.Errorf("afterStartCommand: %w", err) @@ -945,3 +989,11 @@ func (c *StreamClient) resetReadTimeout() error { return nil } + +// PrepUnwind handles the state of the client prior to searching to the +// common ancestor block +func (c *StreamClient) PrepUnwind() { + // this is to ensure that the later call to stop streaming if streaming + // is activated. + c.setStreaming(true) +} diff --git a/zk/datastream/client/stream_client_test.go b/zk/datastream/client/stream_client_test.go index f8078889e6b..db0f80e088a 100644 --- a/zk/datastream/client/stream_client_test.go +++ b/zk/datastream/client/stream_client_test.go @@ -50,7 +50,7 @@ func TestStreamClientReadHeaderEntry(t *testing.T) { } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -118,7 +118,7 @@ func TestStreamClientReadResultEntry(t *testing.T) { } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer server.Close() defer c.Stop() @@ -191,7 +191,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { }, } for _, testCase := range testCases { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) server, conn := net.Pipe() defer c.Stop() defer server.Close() @@ -215,7 +215,7 @@ func TestStreamClientReadFileEntry(t *testing.T) { } func TestStreamClientReadParsedProto(t *testing.T) { - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) serverConn, clientConn := net.Pipe() c.conn = clientConn c.checkTimeout = 1 * time.Second @@ -287,9 +287,10 @@ func TestStreamClientGetLatestL2Block(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) c.conn = clientConn c.checkTimeout = 1 * time.Second + c.allowStops = false expectedL2Block, _ := createL2BlockAndTransactions(t, 5, 0) l2BlockProto := &types.L2BlockProto{L2Block: expectedL2Block} l2BlockRaw, err := l2BlockProto.Marshal() @@ -400,11 +401,12 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { clientConn.Close() }() - c := NewClient(context.Background(), "", 0, 2*time.Second, 0) + c := NewClient(context.Background(), "", 0, 500*time.Millisecond, 0) c.header = &types.HeaderEntry{ TotalEntries: 4, } c.conn = clientConn + c.allowStops = false c.checkTimeout = 1 * time.Second bookmark := types.NewBookmarkProto(blockNum, datastream.BookmarkType_BOOKMARK_TYPE_L2_BLOCK) bookmarkRaw, err := bookmark.Marshal() @@ -487,7 +489,6 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) { return } } - } go createServerResponses(t, serverConn, bookmarkRaw, l2BlockRaw, l2TxsRaw, l2BlockEndRaw, errCh) diff --git a/zk/datastream/mock_services/data_stream_server_mock.go b/zk/datastream/mock_services/data_stream_server_mock.go new file mode 100644 index 00000000000..0ef44befd6d --- /dev/null +++ b/zk/datastream/mock_services/data_stream_server_mock.go @@ -0,0 +1,660 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/datastream/server (interfaces: DataStreamServer) +// +// Generated by this command: +// +// mockgen -typed=true -destination=../mocks/data_stream_server_mock.go -package=mocks . DataStreamServer +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + common "github.com/ledgerwatch/erigon-lib/common" + kv "github.com/ledgerwatch/erigon-lib/kv" + types "github.com/ledgerwatch/erigon/core/types" + server "github.com/ledgerwatch/erigon/zk/datastream/server" + types0 "github.com/ledgerwatch/erigon/zk/datastream/types" + hermez_db "github.com/ledgerwatch/erigon/zk/hermez_db" + gomock "go.uber.org/mock/gomock" +) + +// MockDataStreamServer is a mock of DataStreamServer interface. +type MockDataStreamServer struct { + ctrl *gomock.Controller + recorder *MockDataStreamServerMockRecorder +} + +// MockDataStreamServerMockRecorder is the mock recorder for MockDataStreamServer. +type MockDataStreamServerMockRecorder struct { + mock *MockDataStreamServer +} + +// NewMockDataStreamServer creates a new mock instance. +func NewMockDataStreamServer(ctrl *gomock.Controller) *MockDataStreamServer { + mock := &MockDataStreamServer{ctrl: ctrl} + mock.recorder = &MockDataStreamServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDataStreamServer) EXPECT() *MockDataStreamServerMockRecorder { + return m.recorder +} + +// GetChainId mocks base method. +func (m *MockDataStreamServer) GetChainId() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainId") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetChainId indicates an expected call of GetChainId. +func (mr *MockDataStreamServerMockRecorder) GetChainId() *MockDataStreamServerGetChainIdCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainId", reflect.TypeOf((*MockDataStreamServer)(nil).GetChainId)) + return &MockDataStreamServerGetChainIdCall{Call: call} +} + +// MockDataStreamServerGetChainIdCall wrap *gomock.Call +type MockDataStreamServerGetChainIdCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetChainIdCall) Return(arg0 uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetChainIdCall) Do(f func() uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetChainIdCall) DoAndReturn(f func() uint64) *MockDataStreamServerGetChainIdCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestBatchNumber mocks base method. +func (m *MockDataStreamServer) GetHighestBatchNumber() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestBatchNumber") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestBatchNumber indicates an expected call of GetHighestBatchNumber. +func (mr *MockDataStreamServerMockRecorder) GetHighestBatchNumber() *MockDataStreamServerGetHighestBatchNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestBatchNumber", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestBatchNumber)) + return &MockDataStreamServerGetHighestBatchNumberCall{Call: call} +} + +// MockDataStreamServerGetHighestBatchNumberCall wrap *gomock.Call +type MockDataStreamServerGetHighestBatchNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestBatchNumberCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestBatchNumberCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestBatchNumberCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestBatchNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestBlockNumber mocks base method. +func (m *MockDataStreamServer) GetHighestBlockNumber() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestBlockNumber") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestBlockNumber indicates an expected call of GetHighestBlockNumber. +func (mr *MockDataStreamServerMockRecorder) GetHighestBlockNumber() *MockDataStreamServerGetHighestBlockNumberCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestBlockNumber", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestBlockNumber)) + return &MockDataStreamServerGetHighestBlockNumberCall{Call: call} +} + +// MockDataStreamServerGetHighestBlockNumberCall wrap *gomock.Call +type MockDataStreamServerGetHighestBlockNumberCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestBlockNumberCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestBlockNumberCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestBlockNumberCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestBlockNumberCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestClosedBatch mocks base method. +func (m *MockDataStreamServer) GetHighestClosedBatch() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestClosedBatch") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestClosedBatch indicates an expected call of GetHighestClosedBatch. +func (mr *MockDataStreamServerMockRecorder) GetHighestClosedBatch() *MockDataStreamServerGetHighestClosedBatchCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestClosedBatch", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestClosedBatch)) + return &MockDataStreamServerGetHighestClosedBatchCall{Call: call} +} + +// MockDataStreamServerGetHighestClosedBatchCall wrap *gomock.Call +type MockDataStreamServerGetHighestClosedBatchCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestClosedBatchCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestClosedBatchCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestClosedBatchCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHighestClosedBatchNoCache mocks base method. +func (m *MockDataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHighestClosedBatchNoCache") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHighestClosedBatchNoCache indicates an expected call of GetHighestClosedBatchNoCache. +func (mr *MockDataStreamServerMockRecorder) GetHighestClosedBatchNoCache() *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHighestClosedBatchNoCache", reflect.TypeOf((*MockDataStreamServer)(nil).GetHighestClosedBatchNoCache)) + return &MockDataStreamServerGetHighestClosedBatchNoCacheCall{Call: call} +} + +// MockDataStreamServerGetHighestClosedBatchNoCacheCall wrap *gomock.Call +type MockDataStreamServerGetHighestClosedBatchNoCacheCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) Return(arg0 uint64, arg1 error) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) Do(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetHighestClosedBatchNoCacheCall) DoAndReturn(f func() (uint64, error)) *MockDataStreamServerGetHighestClosedBatchNoCacheCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetStreamServer mocks base method. +func (m *MockDataStreamServer) GetStreamServer() server.StreamServer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStreamServer") + ret0, _ := ret[0].(server.StreamServer) + return ret0 +} + +// GetStreamServer indicates an expected call of GetStreamServer. +func (mr *MockDataStreamServerMockRecorder) GetStreamServer() *MockDataStreamServerGetStreamServerCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStreamServer", reflect.TypeOf((*MockDataStreamServer)(nil).GetStreamServer)) + return &MockDataStreamServerGetStreamServerCall{Call: call} +} + +// MockDataStreamServerGetStreamServerCall wrap *gomock.Call +type MockDataStreamServerGetStreamServerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerGetStreamServerCall) Return(arg0 server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerGetStreamServerCall) Do(f func() server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerGetStreamServerCall) DoAndReturn(f func() server.StreamServer) *MockDataStreamServerGetStreamServerCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IsLastEntryBatchEnd mocks base method. +func (m *MockDataStreamServer) IsLastEntryBatchEnd() (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsLastEntryBatchEnd") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsLastEntryBatchEnd indicates an expected call of IsLastEntryBatchEnd. +func (mr *MockDataStreamServerMockRecorder) IsLastEntryBatchEnd() *MockDataStreamServerIsLastEntryBatchEndCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsLastEntryBatchEnd", reflect.TypeOf((*MockDataStreamServer)(nil).IsLastEntryBatchEnd)) + return &MockDataStreamServerIsLastEntryBatchEndCall{Call: call} +} + +// MockDataStreamServerIsLastEntryBatchEndCall wrap *gomock.Call +type MockDataStreamServerIsLastEntryBatchEndCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerIsLastEntryBatchEndCall) Return(arg0 bool, arg1 error) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerIsLastEntryBatchEndCall) Do(f func() (bool, error)) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerIsLastEntryBatchEndCall) DoAndReturn(f func() (bool, error)) *MockDataStreamServerIsLastEntryBatchEndCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ReadBatches mocks base method. +func (m *MockDataStreamServer) ReadBatches(arg0, arg1 uint64) ([][]*types0.FullL2Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadBatches", arg0, arg1) + ret0, _ := ret[0].([][]*types0.FullL2Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadBatches indicates an expected call of ReadBatches. +func (mr *MockDataStreamServerMockRecorder) ReadBatches(arg0, arg1 any) *MockDataStreamServerReadBatchesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadBatches", reflect.TypeOf((*MockDataStreamServer)(nil).ReadBatches), arg0, arg1) + return &MockDataStreamServerReadBatchesCall{Call: call} +} + +// MockDataStreamServerReadBatchesCall wrap *gomock.Call +type MockDataStreamServerReadBatchesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerReadBatchesCall) Return(arg0 [][]*types0.FullL2Block, arg1 error) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerReadBatchesCall) Do(f func(uint64, uint64) ([][]*types0.FullL2Block, error)) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerReadBatchesCall) DoAndReturn(f func(uint64, uint64) ([][]*types0.FullL2Block, error)) *MockDataStreamServerReadBatchesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindIfNecessary mocks base method. +func (m *MockDataStreamServer) UnwindIfNecessary(arg0 string, arg1 server.DbReader, arg2, arg3, arg4 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindIfNecessary", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindIfNecessary indicates an expected call of UnwindIfNecessary. +func (mr *MockDataStreamServerMockRecorder) UnwindIfNecessary(arg0, arg1, arg2, arg3, arg4 any) *MockDataStreamServerUnwindIfNecessaryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindIfNecessary", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindIfNecessary), arg0, arg1, arg2, arg3, arg4) + return &MockDataStreamServerUnwindIfNecessaryCall{Call: call} +} + +// MockDataStreamServerUnwindIfNecessaryCall wrap *gomock.Call +type MockDataStreamServerUnwindIfNecessaryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindIfNecessaryCall) Return(arg0 error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindIfNecessaryCall) Do(f func(string, server.DbReader, uint64, uint64, uint64) error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindIfNecessaryCall) DoAndReturn(f func(string, server.DbReader, uint64, uint64, uint64) error) *MockDataStreamServerUnwindIfNecessaryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindToBatchStart mocks base method. +func (m *MockDataStreamServer) UnwindToBatchStart(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindToBatchStart", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindToBatchStart indicates an expected call of UnwindToBatchStart. +func (mr *MockDataStreamServerMockRecorder) UnwindToBatchStart(arg0 any) *MockDataStreamServerUnwindToBatchStartCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindToBatchStart", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindToBatchStart), arg0) + return &MockDataStreamServerUnwindToBatchStartCall{Call: call} +} + +// MockDataStreamServerUnwindToBatchStartCall wrap *gomock.Call +type MockDataStreamServerUnwindToBatchStartCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindToBatchStartCall) Return(arg0 error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindToBatchStartCall) Do(f func(uint64) error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindToBatchStartCall) DoAndReturn(f func(uint64) error) *MockDataStreamServerUnwindToBatchStartCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UnwindToBlock mocks base method. +func (m *MockDataStreamServer) UnwindToBlock(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnwindToBlock", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnwindToBlock indicates an expected call of UnwindToBlock. +func (mr *MockDataStreamServerMockRecorder) UnwindToBlock(arg0 any) *MockDataStreamServerUnwindToBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnwindToBlock", reflect.TypeOf((*MockDataStreamServer)(nil).UnwindToBlock), arg0) + return &MockDataStreamServerUnwindToBlockCall{Call: call} +} + +// MockDataStreamServerUnwindToBlockCall wrap *gomock.Call +type MockDataStreamServerUnwindToBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerUnwindToBlockCall) Return(arg0 error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerUnwindToBlockCall) Do(f func(uint64) error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerUnwindToBlockCall) DoAndReturn(f func(uint64) error) *MockDataStreamServerUnwindToBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBatchEnd mocks base method. +func (m *MockDataStreamServer) WriteBatchEnd(arg0 server.DbReader, arg1 uint64, arg2, arg3 *common.Hash) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatchEnd", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatchEnd indicates an expected call of WriteBatchEnd. +func (mr *MockDataStreamServerMockRecorder) WriteBatchEnd(arg0, arg1, arg2, arg3 any) *MockDataStreamServerWriteBatchEndCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatchEnd", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBatchEnd), arg0, arg1, arg2, arg3) + return &MockDataStreamServerWriteBatchEndCall{Call: call} +} + +// MockDataStreamServerWriteBatchEndCall wrap *gomock.Call +type MockDataStreamServerWriteBatchEndCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBatchEndCall) Return(arg0 error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBatchEndCall) Do(f func(server.DbReader, uint64, *common.Hash, *common.Hash) error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBatchEndCall) DoAndReturn(f func(server.DbReader, uint64, *common.Hash, *common.Hash) error) *MockDataStreamServerWriteBatchEndCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBlockWithBatchStartToStream mocks base method. +func (m *MockDataStreamServer) WriteBlockWithBatchStartToStream(arg0 string, arg1 kv.Tx, arg2 server.DbReader, arg3, arg4, arg5 uint64, arg6, arg7 types.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBlockWithBatchStartToStream", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBlockWithBatchStartToStream indicates an expected call of WriteBlockWithBatchStartToStream. +func (mr *MockDataStreamServerMockRecorder) WriteBlockWithBatchStartToStream(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 any) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBlockWithBatchStartToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBlockWithBatchStartToStream), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return &MockDataStreamServerWriteBlockWithBatchStartToStreamCall{Call: call} +} + +// MockDataStreamServerWriteBlockWithBatchStartToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteBlockWithBatchStartToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) Return(arg0 error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) Do(f func(string, kv.Tx, server.DbReader, uint64, uint64, uint64, types.Block, types.Block) error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBlockWithBatchStartToStreamCall) DoAndReturn(f func(string, kv.Tx, server.DbReader, uint64, uint64, uint64, types.Block, types.Block) error) *MockDataStreamServerWriteBlockWithBatchStartToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteBlocksToStreamConsecutively mocks base method. +func (m *MockDataStreamServer) WriteBlocksToStreamConsecutively(arg0 context.Context, arg1 string, arg2 kv.Tx, arg3 server.DbReader, arg4, arg5 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBlocksToStreamConsecutively", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBlocksToStreamConsecutively indicates an expected call of WriteBlocksToStreamConsecutively. +func (mr *MockDataStreamServerMockRecorder) WriteBlocksToStreamConsecutively(arg0, arg1, arg2, arg3, arg4, arg5 any) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBlocksToStreamConsecutively", reflect.TypeOf((*MockDataStreamServer)(nil).WriteBlocksToStreamConsecutively), arg0, arg1, arg2, arg3, arg4, arg5) + return &MockDataStreamServerWriteBlocksToStreamConsecutivelyCall{Call: call} +} + +// MockDataStreamServerWriteBlocksToStreamConsecutivelyCall wrap *gomock.Call +type MockDataStreamServerWriteBlocksToStreamConsecutivelyCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) Return(arg0 error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) Do(f func(context.Context, string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall) DoAndReturn(f func(context.Context, string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteBlocksToStreamConsecutivelyCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteGenesisToStream mocks base method. +func (m *MockDataStreamServer) WriteGenesisToStream(arg0 *types.Block, arg1 *hermez_db.HermezDbReader, arg2 kv.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteGenesisToStream", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteGenesisToStream indicates an expected call of WriteGenesisToStream. +func (mr *MockDataStreamServerMockRecorder) WriteGenesisToStream(arg0, arg1, arg2 any) *MockDataStreamServerWriteGenesisToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGenesisToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteGenesisToStream), arg0, arg1, arg2) + return &MockDataStreamServerWriteGenesisToStreamCall{Call: call} +} + +// MockDataStreamServerWriteGenesisToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteGenesisToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteGenesisToStreamCall) Return(arg0 error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteGenesisToStreamCall) Do(f func(*types.Block, *hermez_db.HermezDbReader, kv.Tx) error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteGenesisToStreamCall) DoAndReturn(f func(*types.Block, *hermez_db.HermezDbReader, kv.Tx) error) *MockDataStreamServerWriteGenesisToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// WriteWholeBatchToStream mocks base method. +func (m *MockDataStreamServer) WriteWholeBatchToStream(arg0 string, arg1 kv.Tx, arg2 server.DbReader, arg3, arg4 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteWholeBatchToStream", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteWholeBatchToStream indicates an expected call of WriteWholeBatchToStream. +func (mr *MockDataStreamServerMockRecorder) WriteWholeBatchToStream(arg0, arg1, arg2, arg3, arg4 any) *MockDataStreamServerWriteWholeBatchToStreamCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteWholeBatchToStream", reflect.TypeOf((*MockDataStreamServer)(nil).WriteWholeBatchToStream), arg0, arg1, arg2, arg3, arg4) + return &MockDataStreamServerWriteWholeBatchToStreamCall{Call: call} +} + +// MockDataStreamServerWriteWholeBatchToStreamCall wrap *gomock.Call +type MockDataStreamServerWriteWholeBatchToStreamCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) Return(arg0 error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) Do(f func(string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockDataStreamServerWriteWholeBatchToStreamCall) DoAndReturn(f func(string, kv.Tx, server.DbReader, uint64, uint64) error) *MockDataStreamServerWriteWholeBatchToStreamCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/datastream/mock_services/stream_server_mock.go b/zk/datastream/mock_services/stream_server_mock.go new file mode 100644 index 00000000000..27d287a223c --- /dev/null +++ b/zk/datastream/mock_services/stream_server_mock.go @@ -0,0 +1,576 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/datastream/server (interfaces: StreamServer) +// +// Generated by this command: +// +// mockgen -typed=true -destination=../mocks/stream_server_mock.go -package=mocks . StreamServer +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + gomock "go.uber.org/mock/gomock" +) + +// MockStreamServer is a mock of StreamServer interface. +type MockStreamServer struct { + ctrl *gomock.Controller + recorder *MockStreamServerMockRecorder +} + +// MockStreamServerMockRecorder is the mock recorder for MockStreamServer. +type MockStreamServerMockRecorder struct { + mock *MockStreamServer +} + +// NewMockStreamServer creates a new mock instance. +func NewMockStreamServer(ctrl *gomock.Controller) *MockStreamServer { + mock := &MockStreamServer{ctrl: ctrl} + mock.recorder = &MockStreamServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStreamServer) EXPECT() *MockStreamServerMockRecorder { + return m.recorder +} + +// AddStreamBookmark mocks base method. +func (m *MockStreamServer) AddStreamBookmark(arg0 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStreamBookmark", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddStreamBookmark indicates an expected call of AddStreamBookmark. +func (mr *MockStreamServerMockRecorder) AddStreamBookmark(arg0 any) *MockStreamServerAddStreamBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStreamBookmark", reflect.TypeOf((*MockStreamServer)(nil).AddStreamBookmark), arg0) + return &MockStreamServerAddStreamBookmarkCall{Call: call} +} + +// MockStreamServerAddStreamBookmarkCall wrap *gomock.Call +type MockStreamServerAddStreamBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerAddStreamBookmarkCall) Return(arg0 uint64, arg1 error) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerAddStreamBookmarkCall) Do(f func([]byte) (uint64, error)) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerAddStreamBookmarkCall) DoAndReturn(f func([]byte) (uint64, error)) *MockStreamServerAddStreamBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddStreamEntry mocks base method. +func (m *MockStreamServer) AddStreamEntry(arg0 datastreamer.EntryType, arg1 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddStreamEntry", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddStreamEntry indicates an expected call of AddStreamEntry. +func (mr *MockStreamServerMockRecorder) AddStreamEntry(arg0, arg1 any) *MockStreamServerAddStreamEntryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStreamEntry", reflect.TypeOf((*MockStreamServer)(nil).AddStreamEntry), arg0, arg1) + return &MockStreamServerAddStreamEntryCall{Call: call} +} + +// MockStreamServerAddStreamEntryCall wrap *gomock.Call +type MockStreamServerAddStreamEntryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerAddStreamEntryCall) Return(arg0 uint64, arg1 error) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerAddStreamEntryCall) Do(f func(datastreamer.EntryType, []byte) (uint64, error)) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerAddStreamEntryCall) DoAndReturn(f func(datastreamer.EntryType, []byte) (uint64, error)) *MockStreamServerAddStreamEntryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// BookmarkPrintDump mocks base method. +func (m *MockStreamServer) BookmarkPrintDump() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BookmarkPrintDump") +} + +// BookmarkPrintDump indicates an expected call of BookmarkPrintDump. +func (mr *MockStreamServerMockRecorder) BookmarkPrintDump() *MockStreamServerBookmarkPrintDumpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BookmarkPrintDump", reflect.TypeOf((*MockStreamServer)(nil).BookmarkPrintDump)) + return &MockStreamServerBookmarkPrintDumpCall{Call: call} +} + +// MockStreamServerBookmarkPrintDumpCall wrap *gomock.Call +type MockStreamServerBookmarkPrintDumpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerBookmarkPrintDumpCall) Return() *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerBookmarkPrintDumpCall) Do(f func()) *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerBookmarkPrintDumpCall) DoAndReturn(f func()) *MockStreamServerBookmarkPrintDumpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CommitAtomicOp mocks base method. +func (m *MockStreamServer) CommitAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// CommitAtomicOp indicates an expected call of CommitAtomicOp. +func (mr *MockStreamServerMockRecorder) CommitAtomicOp() *MockStreamServerCommitAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).CommitAtomicOp)) + return &MockStreamServerCommitAtomicOpCall{Call: call} +} + +// MockStreamServerCommitAtomicOpCall wrap *gomock.Call +type MockStreamServerCommitAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerCommitAtomicOpCall) Return(arg0 error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerCommitAtomicOpCall) Do(f func() error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerCommitAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerCommitAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetBookmark mocks base method. +func (m *MockStreamServer) GetBookmark(arg0 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBookmark", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBookmark indicates an expected call of GetBookmark. +func (mr *MockStreamServerMockRecorder) GetBookmark(arg0 any) *MockStreamServerGetBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBookmark", reflect.TypeOf((*MockStreamServer)(nil).GetBookmark), arg0) + return &MockStreamServerGetBookmarkCall{Call: call} +} + +// MockStreamServerGetBookmarkCall wrap *gomock.Call +type MockStreamServerGetBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetBookmarkCall) Return(arg0 uint64, arg1 error) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetBookmarkCall) Do(f func([]byte) (uint64, error)) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetBookmarkCall) DoAndReturn(f func([]byte) (uint64, error)) *MockStreamServerGetBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetDataBetweenBookmarks mocks base method. +func (m *MockStreamServer) GetDataBetweenBookmarks(arg0, arg1 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDataBetweenBookmarks", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDataBetweenBookmarks indicates an expected call of GetDataBetweenBookmarks. +func (mr *MockStreamServerMockRecorder) GetDataBetweenBookmarks(arg0, arg1 any) *MockStreamServerGetDataBetweenBookmarksCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDataBetweenBookmarks", reflect.TypeOf((*MockStreamServer)(nil).GetDataBetweenBookmarks), arg0, arg1) + return &MockStreamServerGetDataBetweenBookmarksCall{Call: call} +} + +// MockStreamServerGetDataBetweenBookmarksCall wrap *gomock.Call +type MockStreamServerGetDataBetweenBookmarksCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetDataBetweenBookmarksCall) Return(arg0 []byte, arg1 error) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetDataBetweenBookmarksCall) Do(f func([]byte, []byte) ([]byte, error)) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetDataBetweenBookmarksCall) DoAndReturn(f func([]byte, []byte) ([]byte, error)) *MockStreamServerGetDataBetweenBookmarksCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetEntry mocks base method. +func (m *MockStreamServer) GetEntry(arg0 uint64) (datastreamer.FileEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEntry", arg0) + ret0, _ := ret[0].(datastreamer.FileEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEntry indicates an expected call of GetEntry. +func (mr *MockStreamServerMockRecorder) GetEntry(arg0 any) *MockStreamServerGetEntryCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEntry", reflect.TypeOf((*MockStreamServer)(nil).GetEntry), arg0) + return &MockStreamServerGetEntryCall{Call: call} +} + +// MockStreamServerGetEntryCall wrap *gomock.Call +type MockStreamServerGetEntryCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetEntryCall) Return(arg0 datastreamer.FileEntry, arg1 error) *MockStreamServerGetEntryCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetEntryCall) Do(f func(uint64) (datastreamer.FileEntry, error)) *MockStreamServerGetEntryCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetEntryCall) DoAndReturn(f func(uint64) (datastreamer.FileEntry, error)) *MockStreamServerGetEntryCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetFirstEventAfterBookmark mocks base method. +func (m *MockStreamServer) GetFirstEventAfterBookmark(arg0 []byte) (datastreamer.FileEntry, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetFirstEventAfterBookmark", arg0) + ret0, _ := ret[0].(datastreamer.FileEntry) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFirstEventAfterBookmark indicates an expected call of GetFirstEventAfterBookmark. +func (mr *MockStreamServerMockRecorder) GetFirstEventAfterBookmark(arg0 any) *MockStreamServerGetFirstEventAfterBookmarkCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFirstEventAfterBookmark", reflect.TypeOf((*MockStreamServer)(nil).GetFirstEventAfterBookmark), arg0) + return &MockStreamServerGetFirstEventAfterBookmarkCall{Call: call} +} + +// MockStreamServerGetFirstEventAfterBookmarkCall wrap *gomock.Call +type MockStreamServerGetFirstEventAfterBookmarkCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) Return(arg0 datastreamer.FileEntry, arg1 error) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) Do(f func([]byte) (datastreamer.FileEntry, error)) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetFirstEventAfterBookmarkCall) DoAndReturn(f func([]byte) (datastreamer.FileEntry, error)) *MockStreamServerGetFirstEventAfterBookmarkCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetHeader mocks base method. +func (m *MockStreamServer) GetHeader() datastreamer.HeaderEntry { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHeader") + ret0, _ := ret[0].(datastreamer.HeaderEntry) + return ret0 +} + +// GetHeader indicates an expected call of GetHeader. +func (mr *MockStreamServerMockRecorder) GetHeader() *MockStreamServerGetHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHeader", reflect.TypeOf((*MockStreamServer)(nil).GetHeader)) + return &MockStreamServerGetHeaderCall{Call: call} +} + +// MockStreamServerGetHeaderCall wrap *gomock.Call +type MockStreamServerGetHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerGetHeaderCall) Return(arg0 datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerGetHeaderCall) Do(f func() datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerGetHeaderCall) DoAndReturn(f func() datastreamer.HeaderEntry) *MockStreamServerGetHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// RollbackAtomicOp mocks base method. +func (m *MockStreamServer) RollbackAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RollbackAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// RollbackAtomicOp indicates an expected call of RollbackAtomicOp. +func (mr *MockStreamServerMockRecorder) RollbackAtomicOp() *MockStreamServerRollbackAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).RollbackAtomicOp)) + return &MockStreamServerRollbackAtomicOpCall{Call: call} +} + +// MockStreamServerRollbackAtomicOpCall wrap *gomock.Call +type MockStreamServerRollbackAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerRollbackAtomicOpCall) Return(arg0 error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerRollbackAtomicOpCall) Do(f func() error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerRollbackAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerRollbackAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Start mocks base method. +func (m *MockStreamServer) Start() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start") + ret0, _ := ret[0].(error) + return ret0 +} + +// Start indicates an expected call of Start. +func (mr *MockStreamServerMockRecorder) Start() *MockStreamServerStartCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockStreamServer)(nil).Start)) + return &MockStreamServerStartCall{Call: call} +} + +// MockStreamServerStartCall wrap *gomock.Call +type MockStreamServerStartCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerStartCall) Return(arg0 error) *MockStreamServerStartCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerStartCall) Do(f func() error) *MockStreamServerStartCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerStartCall) DoAndReturn(f func() error) *MockStreamServerStartCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// StartAtomicOp mocks base method. +func (m *MockStreamServer) StartAtomicOp() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartAtomicOp") + ret0, _ := ret[0].(error) + return ret0 +} + +// StartAtomicOp indicates an expected call of StartAtomicOp. +func (mr *MockStreamServerMockRecorder) StartAtomicOp() *MockStreamServerStartAtomicOpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAtomicOp", reflect.TypeOf((*MockStreamServer)(nil).StartAtomicOp)) + return &MockStreamServerStartAtomicOpCall{Call: call} +} + +// MockStreamServerStartAtomicOpCall wrap *gomock.Call +type MockStreamServerStartAtomicOpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerStartAtomicOpCall) Return(arg0 error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerStartAtomicOpCall) Do(f func() error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerStartAtomicOpCall) DoAndReturn(f func() error) *MockStreamServerStartAtomicOpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// TruncateFile mocks base method. +func (m *MockStreamServer) TruncateFile(arg0 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TruncateFile", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// TruncateFile indicates an expected call of TruncateFile. +func (mr *MockStreamServerMockRecorder) TruncateFile(arg0 any) *MockStreamServerTruncateFileCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TruncateFile", reflect.TypeOf((*MockStreamServer)(nil).TruncateFile), arg0) + return &MockStreamServerTruncateFileCall{Call: call} +} + +// MockStreamServerTruncateFileCall wrap *gomock.Call +type MockStreamServerTruncateFileCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerTruncateFileCall) Return(arg0 error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerTruncateFileCall) Do(f func(uint64) error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerTruncateFileCall) DoAndReturn(f func(uint64) error) *MockStreamServerTruncateFileCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdateEntryData mocks base method. +func (m *MockStreamServer) UpdateEntryData(arg0 uint64, arg1 datastreamer.EntryType, arg2 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateEntryData", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateEntryData indicates an expected call of UpdateEntryData. +func (mr *MockStreamServerMockRecorder) UpdateEntryData(arg0, arg1, arg2 any) *MockStreamServerUpdateEntryDataCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEntryData", reflect.TypeOf((*MockStreamServer)(nil).UpdateEntryData), arg0, arg1, arg2) + return &MockStreamServerUpdateEntryDataCall{Call: call} +} + +// MockStreamServerUpdateEntryDataCall wrap *gomock.Call +type MockStreamServerUpdateEntryDataCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockStreamServerUpdateEntryDataCall) Return(arg0 error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockStreamServerUpdateEntryDataCall) Do(f func(uint64, datastreamer.EntryType, []byte) error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockStreamServerUpdateEntryDataCall) DoAndReturn(f func(uint64, datastreamer.EntryType, []byte) error) *MockStreamServerUpdateEntryDataCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/datastream/server/data_stream_server.go b/zk/datastream/server/data_stream_server.go index 93eb3c6c27c..9fb48630ca8 100644 --- a/zk/datastream/server/data_stream_server.go +++ b/zk/datastream/server/data_stream_server.go @@ -2,8 +2,10 @@ package server import ( "fmt" + "time" "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + dslog "github.com/0xPolygonHermez/zkevm-data-streamer/log" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" @@ -41,9 +43,9 @@ const ( EtrogBatchNumber = 7 ) -type DataStreamServer struct { - stream *datastreamer.StreamServer - chainId uint64 +type ZkEVMDataStreamServer struct { + streamServer StreamServer + chainId uint64 highestBlockWritten, highestClosedBatchWritten, highestBatchWritten *uint64 @@ -59,16 +61,31 @@ type DataStreamEntryProto interface { Type() types.EntryType } -func NewDataStreamServer(stream *datastreamer.StreamServer, chainId uint64) *DataStreamServer { - return &DataStreamServer{ - stream: stream, +type ZkEVMDataStreamServerFactory struct { +} + +func NewZkEVMDataStreamServerFactory() *ZkEVMDataStreamServerFactory { + return &ZkEVMDataStreamServerFactory{} +} + +func (f *ZkEVMDataStreamServerFactory) CreateStreamServer(port uint16, version uint8, systemID uint64, streamType datastreamer.StreamType, fileName string, writeTimeout time.Duration, inactivityTimeout time.Duration, inactivityCheckInterval time.Duration, cfg *dslog.Config) (StreamServer, error) { + return datastreamer.NewServer(port, version, systemID, streamType, fileName, writeTimeout, inactivityTimeout, inactivityCheckInterval, cfg) +} + +func (f *ZkEVMDataStreamServerFactory) CreateDataStreamServer(streamServer StreamServer, chainId uint64) DataStreamServer { + return &ZkEVMDataStreamServer{ + streamServer: streamServer, chainId: chainId, highestBlockWritten: nil, highestBatchWritten: nil, } } -func (srv *DataStreamServer) GetChainId() uint64 { +func (srv *ZkEVMDataStreamServer) GetStreamServer() StreamServer { + return srv.streamServer +} + +func (srv *ZkEVMDataStreamServer) GetChainId() uint64 { return srv.chainId } @@ -121,8 +138,8 @@ func NewDataStreamEntries(size int) *DataStreamEntries { } } -func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { - if err := srv.stream.CommitAtomicOp(); err != nil { +func (srv *ZkEVMDataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, latestClosedBatch *uint64) error { + if err := srv.streamServer.CommitAtomicOp(); err != nil { return err } @@ -147,7 +164,7 @@ func (srv *DataStreamServer) commitAtomicOp(latestBlockNum, latestBatchNum, late return nil } -func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { +func (srv *ZkEVMDataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntryProto) error { for _, entry := range entries { entryType := entry.Type() @@ -157,11 +174,11 @@ func (srv *DataStreamServer) commitEntriesToStreamProto(entries []DataStreamEntr } if entryType == types.BookmarkEntryType { - if _, err = srv.stream.AddStreamBookmark(em); err != nil { + if _, err = srv.streamServer.AddStreamBookmark(em); err != nil { return err } } else { - if _, err = srv.stream.AddStreamEntry(datastreamer.EntryType(entryType), em); err != nil { + if _, err = srv.streamServer.AddStreamEntry(datastreamer.EntryType(entryType), em); err != nil { return err } } @@ -434,8 +451,8 @@ func BuildWholeBatchStreamEntriesProto( return allEntries, nil } -func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) { - header := srv.stream.GetHeader() +func (srv *ZkEVMDataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) { + header := srv.streamServer.GetHeader() if header.TotalEntries == 0 { return false, nil @@ -444,7 +461,7 @@ func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) //find end block entry to delete from it onward entryNum := header.TotalEntries - 1 var entry datastreamer.FileEntry - entry, err = srv.stream.GetEntry(entryNum) + entry, err = srv.streamServer.GetEntry(entryNum) if err != nil { return false, err } @@ -452,12 +469,12 @@ func (srv *DataStreamServer) IsLastEntryBatchEnd() (isBatchEnd bool, err error) return uint32(entry.Type) == uint32(types.EntryTypeBatchEnd), nil } -func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestBlockNumber() (uint64, error) { if srv.highestBlockWritten != nil { return *srv.highestBlockWritten, nil } - header := srv.stream.GetHeader() + header := srv.streamServer.GetHeader() if header.TotalEntries == 0 { return 0, nil @@ -468,7 +485,7 @@ func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { var err error var entry datastreamer.FileEntry for { - entry, err = srv.stream.GetEntry(entryNum) + entry, err = srv.streamServer.GetEntry(entryNum) if err != nil { return 0, err } @@ -497,7 +514,7 @@ func (srv *DataStreamServer) GetHighestBlockNumber() (uint64, error) { return 0, nil } -func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestBatchNumber() (uint64, error) { if srv.highestBatchWritten != nil { return *srv.highestBatchWritten, nil } @@ -520,7 +537,7 @@ func (srv *DataStreamServer) GetHighestBatchNumber() (uint64, error) { return batch.Number, nil } -func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestClosedBatch() (uint64, error) { if srv.highestClosedBatchWritten != nil { return *srv.highestClosedBatchWritten, nil } @@ -535,7 +552,7 @@ func (srv *DataStreamServer) GetHighestClosedBatch() (uint64, error) { return number, nil } -func (srv *DataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { +func (srv *ZkEVMDataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { entry, found, err := srv.getLastEntryOfType(datastreamer.EntryType(types.EntryTypeBatchEnd)) if err != nil { return 0, err @@ -555,7 +572,7 @@ func (srv *DataStreamServer) GetHighestClosedBatchNoCache() (uint64, error) { // must be done on offline server // finds the position of the block bookmark entry and deletes from it onward // blockNumber 10 would return the stream to before block 10 bookmark -func (srv *DataStreamServer) UnwindToBlock(blockNumber uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindToBlock(blockNumber uint64) error { // check if server is online // find blockend entry @@ -564,18 +581,18 @@ func (srv *DataStreamServer) UnwindToBlock(blockNumber uint64) error { if err != nil { return err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return err } - return srv.stream.TruncateFile(entryNum) + return srv.streamServer.TruncateFile(entryNum) } // must be done on offline server // finds the position of the endBlock entry for the given number // and unwinds the datastream file to it -func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindToBatchStart(batchNumber uint64) error { // check if server is online // find blockend entry @@ -584,21 +601,21 @@ func (srv *DataStreamServer) UnwindToBatchStart(batchNumber uint64) error { if err != nil { return err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return err } - return srv.stream.TruncateFile(entryNum) + return srv.streamServer.TruncateFile(entryNum) } -func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { - header := srv.stream.GetHeader() +func (srv *ZkEVMDataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType) (datastreamer.FileEntry, bool, error) { + header := srv.streamServer.GetHeader() emtryEntry := datastreamer.FileEntry{} // loop will become infinite if using unsigned type for entryNum := int64(header.TotalEntries - 1); entryNum >= 0; entryNum-- { - entry, err := srv.stream.GetEntry(uint64(entryNum)) + entry, err := srv.streamServer.GetEntry(uint64(entryNum)) if err != nil { return emtryEntry, false, err } @@ -611,12 +628,12 @@ func (srv *DataStreamServer) getLastEntryOfType(entryType datastreamer.EntryType } type dataStreamServerIterator struct { - stream *datastreamer.StreamServer + stream StreamServer curEntryNum uint64 header uint64 } -func newDataStreamServerIterator(stream *datastreamer.StreamServer, start uint64) *dataStreamServerIterator { +func newDataStreamServerIterator(stream StreamServer, start uint64) *dataStreamServerIterator { return &dataStreamServerIterator{ stream: stream, curEntryNum: start, @@ -650,20 +667,20 @@ func (it *dataStreamServerIterator) NextFileEntry() (entry *types.FileEntry, err }, nil } -func (srv *DataStreamServer) ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) { +func (srv *ZkEVMDataStreamServer) ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) { bookmark := types.NewBookmarkProto(start, datastream.BookmarkType_BOOKMARK_TYPE_BATCH) marshalled, err := bookmark.Marshal() if err != nil { return nil, err } - entryNum, err := srv.stream.GetBookmark(marshalled) + entryNum, err := srv.streamServer.GetBookmark(marshalled) if err != nil { return nil, err } - iterator := newDataStreamServerIterator(srv.stream, entryNum) + iterator := newDataStreamServerIterator(srv.streamServer, entryNum) return ReadBatches(iterator, start, end) } diff --git a/zk/datastream/server/datastream_populate.go b/zk/datastream/server/datastream_populate.go index 2e0ee750e83..68544d94b83 100644 --- a/zk/datastream/server/datastream_populate.go +++ b/zk/datastream/server/datastream_populate.go @@ -10,11 +10,11 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/zk/datastream/proto/github.com/0xPolygonHermez/zkevm-node/state/datastream" "github.com/ledgerwatch/erigon/zk/hermez_db" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" ) const ( @@ -28,7 +28,7 @@ const ( // basically writes a whole standalone batch // plus the GER updates if the batch gap is > 1 // starts atomicOp and commits it internally -func (srv *DataStreamServer) WriteWholeBatchToStream( +func (srv *ZkEVMDataStreamServer) WriteWholeBatchToStream( logPrefix string, tx kv.Tx, reader DbReader, @@ -55,10 +55,10 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() blocks := make([]eritypes.Block, 0) txsPerBlock := make(map[uint64][]eritypes.Transaction) @@ -91,7 +91,7 @@ func (srv *DataStreamServer) WriteWholeBatchToStream( // writes consecutively blocks from-to // checks for all batch related stuff in the meantime - batch start, batche end, etc // starts atomicOp and commits it internally -func (srv *DataStreamServer) WriteBlocksToStreamConsecutively( +func (srv *ZkEVMDataStreamServer) WriteBlocksToStreamConsecutively( ctx context.Context, logPrefix string, tx kv.Tx, @@ -122,10 +122,10 @@ func (srv *DataStreamServer) WriteBlocksToStreamConsecutively( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() // check if a new batch starts and the old needs closing before that // if it is already closed with a batch end, do not add a new batch end @@ -201,10 +201,10 @@ LOOP: return err } entries = make([]DataStreamEntryProto, 0, insertEntryCount) - if err = srv.stream.CommitAtomicOp(); err != nil { + if err = srv.streamServer.CommitAtomicOp(); err != nil { return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } } @@ -224,7 +224,7 @@ LOOP: // gets other needed data from the reader // writes a batchBookmark and batch start (if needed), block bookmark, block and txs in it // basically a full standalone block -func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( +func (srv *ZkEVMDataStreamServer) WriteBlockWithBatchStartToStream( logPrefix string, tx kv.Tx, reader DbReader, @@ -241,10 +241,10 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() // if start of new batch add batch start entries var batchStartEntries *DataStreamEntries @@ -285,7 +285,7 @@ func (srv *DataStreamServer) WriteBlockWithBatchStartToStream( // if there is something, try to unwind it // in the unwind chek if the block is at batch start // if it is - unwind to previous batch's end, so it deletes batch stat of current batch as well -func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error { +func (srv *ZkEVMDataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error { // if from is higher than the last datastream block number - unwind the stream highestDatastreamBlock, err := srv.GetHighestBlockNumber() if err != nil { @@ -323,7 +323,7 @@ func (srv *DataStreamServer) UnwindIfNecessary(logPrefix string, reader DbReader return nil } -func (srv *DataStreamServer) WriteBatchEnd( +func (srv *ZkEVMDataStreamServer) WriteBatchEnd( reader DbReader, batchNumber uint64, stateRoot *common.Hash, @@ -339,10 +339,10 @@ func (srv *DataStreamServer) WriteBatchEnd( return err } - if err = srv.stream.StartAtomicOp(); err != nil { + if err = srv.streamServer.StartAtomicOp(); err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() batchEndEntries, err := addBatchEndEntriesProto(batchNumber, stateRoot, gers, localExitRoot) if err != nil { @@ -361,7 +361,7 @@ func (srv *DataStreamServer) WriteBatchEnd( return nil } -func (srv *DataStreamServer) WriteGenesisToStream( +func (srv *ZkEVMDataStreamServer) WriteGenesisToStream( genesis *eritypes.Block, reader *hermez_db.HermezDbReader, tx kv.Tx, @@ -376,11 +376,11 @@ func (srv *DataStreamServer) WriteGenesisToStream( return err } - err = srv.stream.StartAtomicOp() + err = srv.streamServer.StartAtomicOp() if err != nil { return err } - defer srv.stream.RollbackAtomicOp() + defer srv.streamServer.RollbackAtomicOp() batchBookmark := newBatchBookmarkEntryProto(genesis.NumberU64()) l2BlockBookmark := newL2BlockBookmarkEntryProto(genesis.NumberU64()) diff --git a/zk/datastream/server/interfaces.go b/zk/datastream/server/interfaces.go new file mode 100644 index 00000000000..ddc1a71d5c0 --- /dev/null +++ b/zk/datastream/server/interfaces.go @@ -0,0 +1,58 @@ +package server + +import ( + "context" + "time" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + dslog "github.com/0xPolygonHermez/zkevm-data-streamer/log" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + eritypes "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/zk/datastream/types" + "github.com/ledgerwatch/erigon/zk/hermez_db" +) + +//go:generate mockgen -typed=true -destination=../mocks/stream_server_mock.go -package=mocks . StreamServer +//go:generate mockgen -typed=true -destination=../mocks/data_stream_server_mock.go -package=mocks . DataStreamServer + +type StreamServer interface { + Start() error + StartAtomicOp() error + AddStreamEntry(etype datastreamer.EntryType, data []byte) (uint64, error) + AddStreamBookmark(bookmark []byte) (uint64, error) + CommitAtomicOp() error + RollbackAtomicOp() error + TruncateFile(entryNum uint64) error + UpdateEntryData(entryNum uint64, etype datastreamer.EntryType, data []byte) error + GetHeader() datastreamer.HeaderEntry + GetEntry(entryNum uint64) (datastreamer.FileEntry, error) + GetBookmark(bookmark []byte) (uint64, error) + GetFirstEventAfterBookmark(bookmark []byte) (datastreamer.FileEntry, error) + GetDataBetweenBookmarks(bookmarkFrom, bookmarkTo []byte) ([]byte, error) + BookmarkPrintDump() +} + +type DataStreamServer interface { + GetStreamServer() StreamServer + GetChainId() uint64 + IsLastEntryBatchEnd() (isBatchEnd bool, err error) + GetHighestBlockNumber() (uint64, error) + GetHighestBatchNumber() (uint64, error) + GetHighestClosedBatch() (uint64, error) + GetHighestClosedBatchNoCache() (uint64, error) + UnwindToBlock(blockNumber uint64) error + UnwindToBatchStart(batchNumber uint64) error + ReadBatches(start uint64, end uint64) ([][]*types.FullL2Block, error) + WriteWholeBatchToStream(logPrefix string, tx kv.Tx, reader DbReader, prevBatchNum, batchNum uint64) error + WriteBlocksToStreamConsecutively(ctx context.Context, logPrefix string, tx kv.Tx, reader DbReader, from, to uint64) error + WriteBlockWithBatchStartToStream(logPrefix string, tx kv.Tx, reader DbReader, forkId, batchNum, prevBlockBatchNum uint64, prevBlock, block eritypes.Block) (err error) + UnwindIfNecessary(logPrefix string, reader DbReader, blockNum, prevBlockBatchNum, batchNum uint64) error + WriteBatchEnd(reader DbReader, batchNumber uint64, stateRoot *common.Hash, localExitRoot *common.Hash) (err error) + WriteGenesisToStream(genesis *eritypes.Block, reader *hermez_db.HermezDbReader, tx kv.Tx) error +} + +type DataStreamServerFactory interface { + CreateStreamServer(port uint16, version uint8, systemID uint64, streamType datastreamer.StreamType, fileName string, writeTimeout time.Duration, inactivityTimeout time.Duration, inactivityCheckInterval time.Duration, cfg *dslog.Config) (StreamServer, error) + CreateDataStreamServer(stream StreamServer, chainId uint64) DataStreamServer +} diff --git a/zk/debug_tools/datastream-host/main.go b/zk/debug_tools/datastream-host/main.go index 003133c617a..41c9faa879f 100644 --- a/zk/debug_tools/datastream-host/main.go +++ b/zk/debug_tools/datastream-host/main.go @@ -9,9 +9,13 @@ import ( "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" log2 "github.com/0xPolygonHermez/zkevm-data-streamer/log" + "github.com/ledgerwatch/erigon/zk/datastream/server" ) -var file = "" +var ( + file = "" + dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() +) func main() { flag.StringVar(&file, "file", "", "datastream file") @@ -23,7 +27,7 @@ func main() { Outputs: []string{"stdout"}, } - stream, err := datastreamer.NewServer(uint16(6900), uint8(3), 1, datastreamer.StreamType(1), file, 5*time.Second, 10*time.Second, 60*time.Second, logConfig) + stream, err := dataStreamServerFactory.CreateStreamServer(uint16(6900), uint8(3), 1, datastreamer.StreamType(1), file, 5*time.Second, 10*time.Second, 60*time.Second, logConfig) if err != nil { fmt.Println("Error creating datastream server:", err) return diff --git a/zk/legacy_executor_verifier/legacy_executor_verifier.go b/zk/legacy_executor_verifier/legacy_executor_verifier.go index 9415e4ea857..17f5446bf40 100644 --- a/zk/legacy_executor_verifier/legacy_executor_verifier.go +++ b/zk/legacy_executor_verifier/legacy_executor_verifier.go @@ -11,8 +11,6 @@ import ( "errors" "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" - "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" @@ -123,7 +121,7 @@ type LegacyExecutorVerifier struct { executorNumber int cancelAllVerifications atomic.Bool - streamServer *server.DataStreamServer + streamServer server.DataStreamServer WitnessGenerator WitnessGenerator promises []*Promise[*VerifierBundle] @@ -133,12 +131,10 @@ type LegacyExecutorVerifier struct { func NewLegacyExecutorVerifier( cfg ethconfig.Zk, executors []*Executor, - chainCfg *chain.Config, db kv.RwDB, witnessGenerator WitnessGenerator, - stream *datastreamer.StreamServer, + streamServer server.DataStreamServer, ) *LegacyExecutorVerifier { - streamServer := server.NewDataStreamServer(stream, chainCfg.ChainID.Uint64()) return &LegacyExecutorVerifier{ db: db, cfg: cfg, diff --git a/zk/stages/stage_batches.go b/zk/stages/stage_batches.go index ed2b8291fa4..e55ad91f707 100644 --- a/zk/stages/stage_batches.go +++ b/zk/stages/stage_batches.go @@ -66,7 +66,8 @@ type DatastreamClient interface { GetLatestL2Block() (*types.FullL2Block, error) GetProgressAtomic() *atomic.Uint64 Start() error - Stop() + Stop() error + PrepUnwind() } type DatastreamReadRunner interface { @@ -208,7 +209,7 @@ func SpawnStageBatches( log.Info(fmt.Sprintf("[%s] Waiting for at least one new block in datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "last processed block", stageProgressBlockNo) newBlockCheckStartTIme = time.Now() } - time.Sleep(1 * time.Second) + time.Sleep(50 * time.Millisecond) } log.Debug(fmt.Sprintf("[%s] Highest block in db and datastream", logPrefix), "datastreamBlock", highestDSL2Block.L2BlockNumber, "dbBlock", stageProgressBlockNo) @@ -314,6 +315,7 @@ func SpawnStageBatches( if tx, err = cfg.db.BeginRw(ctx); err != nil { return fmt.Errorf("failed to open tx, %w", err) } + defer tx.Rollback() hermezDb.SetNewTx(tx) eriDb.SetNewTx(tx) batchProcessor.SetNewTx(tx) @@ -630,6 +632,7 @@ func rollback( tx kv.RwTx, u stagedsync.Unwinder, ) (uint64, error) { + dsQueryClient.PrepUnwind() ancestorBlockNum, ancestorBlockHash, err := findCommonAncestor(eriDb, hermezDb, dsQueryClient, latestDSBlockNum) if err != nil { return 0, err @@ -746,7 +749,9 @@ func newStreamClient(ctx context.Context, cfg BatchesCfg, latestForkId uint64) ( return nil, nil, fmt.Errorf("dsClient.Start: %w", err) } stopFn = func() { - dsClient.Stop() + if err := dsClient.Stop(); err != nil { + log.Warn("Failed to stop datastream client", "err", err) + } } } else { dsClient = cfg.dsClient diff --git a/zk/stages/stage_dataStreamCatchup.go b/zk/stages/stage_data_stream_catch_up.go similarity index 82% rename from zk/stages/stage_dataStreamCatchup.go rename to zk/stages/stage_data_stream_catch_up.go index 1b43a0c5681..0fd50d59029 100644 --- a/zk/stages/stage_dataStreamCatchup.go +++ b/zk/stages/stage_data_stream_catch_up.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -16,20 +15,18 @@ import ( ) type DataStreamCatchupCfg struct { - db kv.RwDB - stream *datastreamer.StreamServer - chainId uint64 - streamVersion int - hasExecutors bool + db kv.RwDB + dataStreamServer server.DataStreamServer + streamVersion int + hasExecutors bool } -func StageDataStreamCatchupCfg(stream *datastreamer.StreamServer, db kv.RwDB, chainId uint64, streamVersion int, hasExecutors bool) DataStreamCatchupCfg { +func StageDataStreamCatchupCfg(dataStreamServer server.DataStreamServer, db kv.RwDB, chainId uint64, streamVersion int, hasExecutors bool) DataStreamCatchupCfg { return DataStreamCatchupCfg{ - stream: stream, - db: db, - chainId: chainId, - streamVersion: streamVersion, - hasExecutors: hasExecutors, + dataStreamServer: dataStreamServer, + db: db, + streamVersion: streamVersion, + hasExecutors: hasExecutors, } } @@ -41,9 +38,8 @@ func SpawnStageDataStreamCatchup( ) error { logPrefix := s.LogPrefix() log.Info(fmt.Sprintf("[%s] Starting...", logPrefix)) - stream := cfg.stream - if stream == nil { + if cfg.dataStreamServer == nil { // skip the stage if there is no streamer provided log.Info(fmt.Sprintf("[%s] no streamer provided, skipping stage", logPrefix)) return nil @@ -61,7 +57,7 @@ func SpawnStageDataStreamCatchup( createdTx = true } - finalBlockNumber, err := CatchupDatastream(ctx, logPrefix, tx, stream, cfg.chainId) + finalBlockNumber, err := CatchupDatastream(ctx, logPrefix, tx, cfg.dataStreamServer) if err != nil { return err } @@ -77,8 +73,7 @@ func SpawnStageDataStreamCatchup( return err } -func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, stream *datastreamer.StreamServer, chainId uint64) (uint64, error) { - srv := server.NewDataStreamServer(stream, chainId) +func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, srv server.DataStreamServer) (uint64, error) { reader := hermez_db.NewHermezDbReader(tx) var ( @@ -122,7 +117,7 @@ func CatchupDatastream(ctx context.Context, logPrefix string, tx kv.RwTx, stream // a quick check that we haven't written anything to the stream yet. Stage progress is a little misleading // for genesis as we are in fact at block 0 here! Getting the header has some performance overhead, so // we only want to do this when we know the previous progress is 0. - header := stream.GetHeader() + header := srv.GetStreamServer().GetHeader() if header.TotalEntries == 0 { genesis, err := rawdb.ReadBlockByNumber(tx, 0) if err != nil { diff --git a/zk/stages/stage_data_stream_catch_up_test.go b/zk/stages/stage_data_stream_catch_up_test.go new file mode 100644 index 00000000000..00b1fb880d6 --- /dev/null +++ b/zk/stages/stage_data_stream_catch_up_test.go @@ -0,0 +1,103 @@ +package stages + +import ( + "context" + "math/big" + "os" + "testing" + + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/smt/pkg/db" + mocks "github.com/ledgerwatch/erigon/zk/datastream/mock_services" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "gotest.tools/v3/assert" +) + +func TestSpawnStageDataStreamCatchup(t *testing.T) { + // Arrange + os.Setenv("CDK_ERIGON_SEQUENCER", "1") + + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx1 := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx1) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx1) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.DataStream, BlockNumber: 0} + + hDB := hermez_db.NewHermezDb(tx1) + + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + + genesisHeader := &types.Header{ + Number: big.NewInt(0), + Time: 0, + Difficulty: big.NewInt(1), + GasLimit: 8000000, + GasUsed: 0, + ParentHash: common.HexToHash("0x1"), + TxHash: common.HexToHash("0x2"), + ReceiptHash: common.HexToHash("0x3"), + } + + txs := []types.Transaction{} + uncles := []*types.Header{} + receipts := []*types.Receipt{} + withdrawals := []*types.Withdrawal{} + + genesisBlock := types.NewBlock(genesisHeader, txs, uncles, receipts, withdrawals) + + err = rawdb.WriteBlock(tx1, genesisBlock) + require.NoError(t, err) + err = rawdb.WriteCanonicalHash(tx1, genesisBlock.Hash(), genesisBlock.NumberU64()) + require.NoError(t, err) + + err = stages.SaveStageProgress(tx1, stages.DataStream, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx1, stages.Execution, 20) + require.NoError(t, err) + + chainID := uint64(1) + streamVersion := 1 + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + streamServerMock := mocks.NewMockStreamServer(mockCtrl) + dataStreamServerMock := mocks.NewMockDataStreamServer(mockCtrl) + + streamServerHeader := datastreamer.HeaderEntry{TotalEntries: 0} + streamServerMock.EXPECT().GetHeader().Return(streamServerHeader) + + dataStreamServerMock.EXPECT().GetHighestBlockNumber().Return(uint64(0), nil) + dataStreamServerMock.EXPECT().GetStreamServer().Return(streamServerMock) + + hDBReaderMatcher := gomock.AssignableToTypeOf(&hermez_db.HermezDbReader{}) + + dataStreamServerMock.EXPECT().WriteGenesisToStream(gomock.Cond(func(x any) bool { + return x.(*types.Block).Hash() == genesisBlock.Hash() + }), hDBReaderMatcher, tx1).Return(nil) + + dataStreamServerMock.EXPECT().WriteBlocksToStreamConsecutively(ctx, s.LogPrefix(), tx1, hDBReaderMatcher, uint64(1), uint64(20)).Return(nil) + + cfg := StageDataStreamCatchupCfg(dataStreamServerMock, db1, chainID, streamVersion, true) + + // Act + err = SpawnStageDataStreamCatchup(s, ctx, tx1, cfg) + require.NoError(t, err) + + // Assert + // check SaveStageProgress + stageProgress, err := stages.GetStageProgress(tx1, stages.DataStream) + require.NoError(t, err) + assert.Equal(t, uint64(20), stageProgress) +} diff --git a/zk/stages/stage_l1_sequencer_sync.go b/zk/stages/stage_l1_sequencer_sync.go index ee2e12f83ca..cac85804941 100644 --- a/zk/stages/stage_l1_sequencer_sync.go +++ b/zk/stages/stage_l1_sequencer_sync.go @@ -64,7 +64,6 @@ func SpawnL1SequencerSyncStage( } if progress == 0 { progress = cfg.zkCfg.L1FirstBlock - 1 - } // if the flag is set - wait for that block to be finalized on L1 before continuing @@ -200,7 +199,7 @@ Loop: const ( injectedBatchLogTransactionStartByte = 128 - injectedBatchLastGerStartByte = 31 + injectedBatchLastGerStartByte = 32 injectedBatchLastGerEndByte = 64 injectedBatchSequencerStartByte = 76 injectedBatchSequencerEndByte = 96 diff --git a/zk/stages/stage_l1_sequencer_sync_test.go b/zk/stages/stage_l1_sequencer_sync_test.go new file mode 100644 index 00000000000..5dc1f836dbb --- /dev/null +++ b/zk/stages/stage_l1_sequencer_sync_test.go @@ -0,0 +1,290 @@ +package stages + +import ( + "context" + "math/big" + "testing" + "time" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/mocks" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/smt/pkg/db" + "github.com/ledgerwatch/erigon/zk/contracts" + "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/syncer" + "github.com/ledgerwatch/log/v3" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestSpawnL1SequencerSyncStage(t *testing.T) { + // arrange + ctx, db1 := context.Background(), memdb.NewTestDB(t) + tx := memdb.BeginRw(t, db1) + err := hermez_db.CreateHermezBuckets(tx) + require.NoError(t, err) + err = db.CreateEriDbBuckets(tx) + require.NoError(t, err) + + hDB := hermez_db.NewHermezDb(tx) + err = hDB.WriteBlockBatch(0, 0) + require.NoError(t, err) + err = stages.SaveStageProgress(tx, stages.L1SequencerSync, 0) + require.NoError(t, err) + + s := &stagedsync.StageState{ID: stages.L1SequencerSync, BlockNumber: 0} + u := &stagedsync.Sync{} + + // mocks + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + EthermanMock := mocks.NewMockIEtherman(mockCtrl) + + l1ContractAddresses := []common.Address{ + common.HexToAddress("0x1"), + common.HexToAddress("0x2"), + common.HexToAddress("0x3"), + } + l1ContractTopics := [][]common.Hash{ + []common.Hash{common.HexToHash("0x1")}, + []common.Hash{common.HexToHash("0x2")}, + []common.Hash{common.HexToHash("0x3")}, + } + + l1FirstBlock := big.NewInt(20) + + finalizedBlockParentHash := common.HexToHash("0x123456789") + finalizedBlockTime := uint64(time.Now().Unix()) + finalizedBlockNumber := big.NewInt(21) + finalizedBlockHeader := &types.Header{ParentHash: finalizedBlockParentHash, Number: finalizedBlockNumber, Time: finalizedBlockTime} + finalizedBlock := types.NewBlockWithHeader(finalizedBlockHeader) + + latestBlockParentHash := finalizedBlock.Hash() + latestBlockTime := uint64(time.Now().Unix()) + latestBlockNumber := big.NewInt(22) + latestBlockHeader := &types.Header{ParentHash: latestBlockParentHash, Number: latestBlockNumber, Time: latestBlockTime} + latestBlock := types.NewBlockWithHeader(latestBlockHeader) + + EthermanMock.EXPECT().HeaderByNumber(gomock.Any(), finalizedBlockNumber).Return(finalizedBlockHeader, nil).AnyTimes() + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), big.NewInt(rpc.FinalizedBlockNumber.Int64())).Return(finalizedBlock, nil).AnyTimes() + EthermanMock.EXPECT().HeaderByNumber(gomock.Any(), latestBlockNumber).Return(latestBlockHeader, nil).AnyTimes() + EthermanMock.EXPECT().BlockByNumber(gomock.Any(), nil).Return(latestBlock, nil).AnyTimes() + + filterQuery := ethereum.FilterQuery{ + FromBlock: l1FirstBlock, + ToBlock: latestBlockNumber, + Addresses: l1ContractAddresses, + Topics: l1ContractTopics, + } + + type testCase struct { + name string + getLog func(hDB *hermez_db.HermezDb) (types.Log, error) + assert func(t *testing.T, hDB *hermez_db.HermezDb) + } + + const ( + forkIdBytesStartPosition = 64 + forkIdBytesEndPosition = 96 + rollupDataSize = 100 + + injectedBatchLogTransactionStartByte = 128 + injectedBatchLastGerStartByte = 32 + injectedBatchLastGerEndByte = 64 + injectedBatchSequencerStartByte = 76 + injectedBatchSequencerEndByte = 96 + ) + + testCases := []testCase{ + { + name: "InitialSequenceBatchesTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + ger := common.HexToHash("0x111111111") + sequencer := common.HexToAddress("0x222222222") + batchL2Data := common.HexToHash("0x333333333") + + initialSequenceBatchesData := make([]byte, 200) + copy(initialSequenceBatchesData[injectedBatchLastGerStartByte:injectedBatchLastGerEndByte], ger.Bytes()) + copy(initialSequenceBatchesData[injectedBatchSequencerStartByte:injectedBatchSequencerEndByte], sequencer.Bytes()) + copy(initialSequenceBatchesData[injectedBatchLogTransactionStartByte:], batchL2Data.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.InitialSequenceBatchesTopic}, + Data: initialSequenceBatchesData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + ger := common.HexToHash("0x111111111") + sequencer := common.HexToAddress("0x222222222") + batchL2Data := common.HexToHash("0x333333333") + + l1InjectedBatch, err := hDB.GetL1InjectedBatch(0) + require.NoError(t, err) + + assert.Equal(t, l1InjectedBatch.L1BlockNumber, latestBlock.NumberU64()) + assert.Equal(t, l1InjectedBatch.Timestamp, latestBlock.Time()) + assert.Equal(t, l1InjectedBatch.L1BlockHash, latestBlock.Hash()) + assert.Equal(t, l1InjectedBatch.L1ParentHash, latestBlock.ParentHash()) + assert.Equal(t, l1InjectedBatch.LastGlobalExitRoot.String(), ger.String()) + assert.Equal(t, l1InjectedBatch.Sequencer.String(), sequencer.String()) + assert.ElementsMatch(t, l1InjectedBatch.Transaction, batchL2Data.Bytes()) + }, + }, + { + name: "AddNewRollupType", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupType := uint64(1) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupData := make([]byte, rollupDataSize) + rollupForkId := uint64(111) + rollupForkIdHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupForkId).Bytes()) + copy(rollupData[forkIdBytesStartPosition:forkIdBytesEndPosition], rollupForkIdHash.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopic, rollupTypeHash}, + Data: rollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forkID, err := hDB.GetForkFromRollupType(uint64(1)) + require.NoError(t, err) + + assert.Equal(t, forkID, uint64(111)) + }, + }, + { + name: "AddNewRollupTypeTopicBanana", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupType := uint64(2) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupData := make([]byte, rollupDataSize) + rollupForkId := uint64(222) + rollupForkIdHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupForkId).Bytes()) + copy(rollupData[forkIdBytesStartPosition:forkIdBytesEndPosition], rollupForkIdHash.Bytes()) + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.AddNewRollupTypeTopicBanana, rollupTypeHash}, + Data: rollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forkID, err := hDB.GetForkFromRollupType(uint64(2)) + require.NoError(t, err) + + assert.Equal(t, forkID, uint64(222)) + }, + }, + { + name: "CreateNewRollupTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupID := uint64(99999) + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + rollupType := uint64(33) + rollupForkID := uint64(333) + if funcErr := hDB.WriteRollupType(rollupType, rollupForkID); funcErr != nil { + return types.Log{}, funcErr + } + newRollupDataCreation := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()).Bytes() + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.CreateNewRollupTopic, rollupIDHash}, + Data: newRollupDataCreation, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forks, batches, err := hDB.GetAllForkHistory() + for i := 0; i < len(forks); i++ { + if forks[i] == uint64(333) { + assert.Equal(t, batches[i], uint64(0)) + break + } + } + require.NoError(t, err) + }, + }, + { + name: "UpdateRollupTopic", + getLog: func(hDB *hermez_db.HermezDb) (types.Log, error) { + rollupID := uint64(99999) + rollupIDHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupID).Bytes()) + rollupType := uint64(44) + rollupTypeHash := common.BytesToHash(big.NewInt(0).SetUint64(rollupType).Bytes()) + rollupForkID := uint64(444) + if funcErr := hDB.WriteRollupType(rollupType, rollupForkID); funcErr != nil { + return types.Log{}, funcErr + } + latestVerified := uint64(4444) + latestVerifiedHash := common.BytesToHash(big.NewInt(0).SetUint64(latestVerified).Bytes()) + updateRollupData := rollupTypeHash.Bytes() + updateRollupData = append(updateRollupData, latestVerifiedHash.Bytes()...) + + return types.Log{ + BlockNumber: latestBlockNumber.Uint64(), + Address: l1ContractAddresses[0], + Topics: []common.Hash{contracts.UpdateRollupTopic, rollupIDHash}, + Data: updateRollupData, + }, nil + }, + assert: func(t *testing.T, hDB *hermez_db.HermezDb) { + forks, batches, err := hDB.GetAllForkHistory() + for i := 0; i < len(forks); i++ { + if forks[i] == uint64(444) { + assert.Equal(t, batches[i], uint64(4444)) + break + } + } + require.NoError(t, err) + }, + }, + } + + filteredLogs := []types.Log{} + for _, tc := range testCases { + ll, err := tc.getLog(hDB) + require.NoError(t, err) + filteredLogs = append(filteredLogs, ll) + } + + EthermanMock.EXPECT().FilterLogs(gomock.Any(), filterQuery).Return(filteredLogs, nil).AnyTimes() + + l1Syncer := syncer.NewL1Syncer(ctx, []syncer.IEtherman{EthermanMock}, l1ContractAddresses, l1ContractTopics, 10, 0, "latest") + // updater := l1infotree.NewUpdater(ðconfig.Zk{}, l1Syncer) + zkCfg := ðconfig.Zk{ + L1RollupId: uint64(99999), + L1FirstBlock: l1FirstBlock.Uint64(), + L1FinalizedBlockRequirement: uint64(21), + } + cfg := StageL1SequencerSyncCfg(db1, zkCfg, l1Syncer) + + // act + err = SpawnL1SequencerSyncStage(s, u, tx, cfg, ctx, log.New()) + require.NoError(t, err) + + // assert + for _, tc := range testCases { + tc.assert(t, hDB) + } +} + +func TestUnwindL1SequencerSyncStage(t *testing.T) { + err := UnwindL1SequencerSyncStage(nil, nil, L1SequencerSyncCfg{}, context.Background()) + assert.Nil(t, err) +} + +func TestPruneL1SequencerSyncStage(t *testing.T) { + err := PruneL1SequencerSyncStage(nil, nil, L1SequencerSyncCfg{}, context.Background()) + assert.Nil(t, err) +} diff --git a/zk/stages/stage_sequence_execute.go b/zk/stages/stage_sequence_execute.go index b93182db913..510c74dfe49 100644 --- a/zk/stages/stage_sequence_execute.go +++ b/zk/stages/stage_sequence_execute.go @@ -42,7 +42,7 @@ func SpawnSequencingStage( return err } - highestBatchInDs, err := cfg.datastreamServer.GetHighestBatchNumber() + highestBatchInDs, err := cfg.dataStreamServer.GetHighestBatchNumber() if err != nil { return err } @@ -132,7 +132,7 @@ func sequencingBatchStep( return err } - if err = cfg.datastreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { + if err = cfg.dataStreamServer.WriteWholeBatchToStream(logPrefix, sdb.tx, sdb.hermezDb.HermezDbReader, lastBatch, injectedBatchBatchNumber); err != nil { return err } if err = stages.SaveStageProgress(sdb.tx, stages.DataStream, 1); err != nil { diff --git a/zk/stages/stage_sequence_execute_data_stream.go b/zk/stages/stage_sequence_execute_data_stream.go index 00b32cc9393..20258f03fa4 100644 --- a/zk/stages/stage_sequence_execute_data_stream.go +++ b/zk/stages/stage_sequence_execute_data_stream.go @@ -20,7 +20,7 @@ type SequencerBatchStreamWriter struct { logPrefix string legacyVerifier *verifier.LegacyExecutorVerifier sdb *stageDb - streamServer *server.DataStreamServer + streamServer server.DataStreamServer hasExecutors bool } @@ -32,7 +32,7 @@ func newSequencerBatchStreamWriter(batchContext *BatchContext, batchState *Batch logPrefix: batchContext.s.LogPrefix(), legacyVerifier: batchContext.cfg.legacyVerifier, sdb: batchContext.sdb, - streamServer: batchContext.cfg.datastreamServer, + streamServer: batchContext.cfg.dataStreamServer, hasExecutors: batchState.hasExecutorForThisBatch, } } @@ -107,17 +107,17 @@ func (sbc *SequencerBatchStreamWriter) writeBlockDetailsToDatastream(verifiedBun } func alignExecutionToDatastream(batchContext *BatchContext, lastExecutedBlock uint64, u stagedsync.Unwinder) (bool, error) { - lastStartedDatastreamBatch, err := batchContext.cfg.datastreamServer.GetHighestBatchNumber() + lastStartedDatastreamBatch, err := batchContext.cfg.dataStreamServer.GetHighestBatchNumber() if err != nil { return false, err } - lastClosedDatastreamBatch, err := batchContext.cfg.datastreamServer.GetHighestClosedBatch() + lastClosedDatastreamBatch, err := batchContext.cfg.dataStreamServer.GetHighestClosedBatch() if err != nil { return false, err } - lastDatastreamBlock, err := batchContext.cfg.datastreamServer.GetHighestBlockNumber() + lastDatastreamBlock, err := batchContext.cfg.dataStreamServer.GetHighestBlockNumber() if err != nil { return false, err } @@ -147,7 +147,7 @@ func alignExecutionToDatastream(batchContext *BatchContext, lastExecutedBlock ui } func finalizeLastBatchInDatastreamIfNotFinalized(batchContext *BatchContext, batchToClose, blockToCloseAt uint64) error { - isLastEntryBatchEnd, err := batchContext.cfg.datastreamServer.IsLastEntryBatchEnd() + isLastEntryBatchEnd, err := batchContext.cfg.dataStreamServer.IsLastEntryBatchEnd() if err != nil { return err } @@ -168,7 +168,7 @@ func finalizeLastBatchInDatastream(batchContext *BatchContext, batchToClose, blo return err } root := lastBlock.Root() - if err = batchContext.cfg.datastreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchToClose, &root, &ler); err != nil { + if err = batchContext.cfg.dataStreamServer.WriteBatchEnd(batchContext.sdb.hermezDb, batchToClose, &root, &ler); err != nil { return err } return nil diff --git a/zk/stages/stage_sequence_execute_resequence.go b/zk/stages/stage_sequence_execute_resequence.go index dee485079fc..d7fa2e18ab7 100644 --- a/zk/stages/stage_sequence_execute_resequence.go +++ b/zk/stages/stage_sequence_execute_resequence.go @@ -23,12 +23,12 @@ func resequence( log.Info(fmt.Sprintf("[%s] Last batch %d is lower than highest batch in datastream %d, resequencing...", s.LogPrefix(), lastBatch, highestBatchInDs)) - batches, err := cfg.datastreamServer.ReadBatches(lastBatch+1, highestBatchInDs) + batches, err := cfg.dataStreamServer.ReadBatches(lastBatch+1, highestBatchInDs) if err != nil { return err } - if err = cfg.datastreamServer.UnwindToBatchStart(lastBatch + 1); err != nil { + if err = cfg.dataStreamServer.UnwindToBatchStart(lastBatch + 1); err != nil { return err } diff --git a/zk/stages/stage_sequence_execute_utils.go b/zk/stages/stage_sequence_execute_utils.go index c72c7954de4..62460830fc5 100644 --- a/zk/stages/stage_sequence_execute_utils.go +++ b/zk/stages/stage_sequence_execute_utils.go @@ -14,7 +14,6 @@ import ( "fmt" - "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" @@ -33,13 +32,13 @@ import ( "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/zk/datastream/server" "github.com/ledgerwatch/erigon/zk/hermez_db" + "github.com/ledgerwatch/erigon/zk/l1infotree" verifier "github.com/ledgerwatch/erigon/zk/legacy_executor_verifier" zktx "github.com/ledgerwatch/erigon/zk/tx" "github.com/ledgerwatch/erigon/zk/txpool" zktypes "github.com/ledgerwatch/erigon/zk/types" "github.com/ledgerwatch/erigon/zk/utils" "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/zk/l1infotree" ) const ( @@ -75,8 +74,7 @@ type SequenceBlockCfg struct { syncCfg ethconfig.Sync genesis *types.Genesis agg *libstate.Aggregator - stream *datastreamer.StreamServer - datastreamServer *server.DataStreamServer + dataStreamServer server.DataStreamServer zk *ethconfig.Zk miningConfig *params.MiningConfig @@ -107,7 +105,7 @@ func StageSequenceBlocksCfg( genesis *types.Genesis, syncCfg ethconfig.Sync, agg *libstate.Aggregator, - stream *datastreamer.StreamServer, + dataStreamServer server.DataStreamServer, zk *ethconfig.Zk, miningConfig *params.MiningConfig, @@ -135,8 +133,7 @@ func StageSequenceBlocksCfg( historyV3: historyV3, syncCfg: syncCfg, agg: agg, - stream: stream, - datastreamServer: server.NewDataStreamServer(stream, chainConfig.ChainID.Uint64()), + dataStreamServer: dataStreamServer, zk: zk, miningConfig: miningConfig, txPool: txPool, @@ -173,10 +170,10 @@ func (sCfg *SequenceBlockCfg) toErigonExecuteBlockCfg() stagedsync.ExecuteBlockC func validateIfDatastreamIsAheadOfExecution( s *stagedsync.StageState, -// u stagedsync.Unwinder, + // u stagedsync.Unwinder, ctx context.Context, cfg SequenceBlockCfg, -// historyCfg stagedsync.HistoryCfg, + // historyCfg stagedsync.HistoryCfg, ) error { roTx, err := cfg.db.BeginRo(ctx) if err != nil { @@ -189,7 +186,7 @@ func validateIfDatastreamIsAheadOfExecution( return err } - lastDatastreamBlock, err := cfg.datastreamServer.GetHighestBlockNumber() + lastDatastreamBlock, err := cfg.dataStreamServer.GetHighestBlockNumber() if err != nil { return err } diff --git a/zk/stages/stages.go b/zk/stages/stages.go index f1335b12164..4ada15e99ec 100644 --- a/zk/stages/stages.go +++ b/zk/stages/stages.go @@ -10,6 +10,11 @@ import ( stages "github.com/ledgerwatch/erigon/eth/stagedsync" stages2 "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/zk/datastream/server" +) + +var ( + dataStreamServerFactory = server.NewZkEVMDataStreamServerFactory() ) func SequencerZkStages( diff --git a/zk/stages/test_utils.go b/zk/stages/test_utils.go index f24557522b4..221ccc1734b 100644 --- a/zk/stages/test_utils.go +++ b/zk/stages/test_utils.go @@ -100,6 +100,11 @@ func (c *TestDatastreamClient) Start() error { return nil } -func (c *TestDatastreamClient) Stop() { +func (c *TestDatastreamClient) Stop() error { c.isStarted = false + return nil +} + +func (c *TestDatastreamClient) PrepUnwind() { + // do nothing } diff --git a/zk/tests/unwinds/unwind.sh b/zk/tests/unwinds/unwind.sh index 84b5f436180..b48f1c15c55 100755 --- a/zk/tests/unwinds/unwind.sh +++ b/zk/tests/unwinds/unwind.sh @@ -15,8 +15,8 @@ dataPath="./datadir" firstStop=11204 stopBlock=11315 unwindBatch=70 -firstTimeout=150s -secondTimeout=150s +firstTimeout=300s +secondTimeout=300s rm -rf "$dataPath/rpc-datadir" rm -rf "$dataPath/phase1-dump1" @@ -27,10 +27,10 @@ rm -rf "$dataPath/phase1-diffs" rm -rf "$dataPath/phase2-diffs" # run datastream server -timeout 600s go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & +go run ./zk/debug_tools/datastream-host --file="$(pwd)/zk/tests/unwinds/datastream/hermez-dynamic-integration8-datastream/data-stream.bin" & # in order to start the datastream server -sleep 5 +sleep 10 # run erigon for a while to sync to the unwind point to capture the dump timeout $firstTimeout ./build/bin/cdk-erigon \ diff --git a/zk/txpool/fetch_test.go b/zk/txpool/fetch_test.go new file mode 100644 index 00000000000..c912ec6d0b6 --- /dev/null +++ b/zk/txpool/fetch_test.go @@ -0,0 +1,96 @@ +package txpool + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "testing" + "time" + + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/u256" + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" + "github.com/ledgerwatch/erigon-lib/txpool" + "github.com/ledgerwatch/erigon-lib/txpool/txpoolcfg" + types "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +var peerID types.PeerID = gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}) // "12345" + +// DecodeHex converts a hex string to a byte array. +func decodeHex(in string) []byte { + payload, err := hex.DecodeString(in) + if err != nil { + panic(err) + } + return payload +} + +func TestFetch(t *testing.T) { + assert, require := assert.New(t), require.New(t) + ch := make(chan types.Announcements, 100) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + defer coreDB.Close() + db := memdb.NewTestPoolDB(t) + path := fmt.Sprintf("/tmp/db-test-%v", time.Now().UTC().Format(time.RFC3339Nano)) + txPoolDB := newTestTxPoolDB(t, path) + defer txPoolDB.Close() + aclsDB := newTestACLDB(t, path) + defer aclsDB.Close() + + // Check if the dbs are created. + require.NotNil(t, db) + require.NotNil(t, txPoolDB) + require.NotNil(t, aclsDB) + + cfg := txpoolcfg.DefaultConfig + ethCfg := ðconfig.Defaults + sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) + pool, err := New(ch, coreDB, cfg, ethCfg, sendersCache, *u256.N1, nil, nil, aclsDB) + assert.NoError(err) + require.True(pool != nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ctrl := gomock.NewController(t) + remoteKvClient := remote.NewMockKVClient(ctrl) + sentryServer := sentry.NewMockSentryServer(ctrl) + + m := txpool.NewMockSentry(ctx, sentryServer) + sentryClient := direct.NewSentryClientDirect(direct.ETH66, m) + fetch := NewFetch(ctx, []direct.SentryClient{sentryClient}, pool, remoteKvClient, nil, nil, *u256.N1) + var wg sync.WaitGroup + fetch.SetWaitGroup(&wg) + // The corresponding WaitGroup.Done() will be called by the Sentry. + // First will be called by (txpool.MockSentry).Messages + // Second will be called by (txpool.MockSentry).PeerEvents + m.StreamWg.Add(2) + fetch.ConnectSentries() + m.StreamWg.Wait() + + // Send one transaction id with ETH66 protocol. + // The corresponding WaitGroup.Done() will be called by the fetch.receiveMessage() + wg.Add(1) + errs := m.Send(&sentry.InboundMessage{ + Id: sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + Data: decodeHex("e1a0595e27a835cd79729ff1eeacec3120eeb6ed1464a04ec727aaca734ead961328"), + PeerId: peerID, + }) + for i, err := range errs { + if err != nil { + t.Errorf("sending new pool txn hashes 66 (%d): %v", i, err) + } + } + wg.Wait() +} diff --git a/zk/txpool/pool.go b/zk/txpool/pool.go index 6f6b7789be4..023212b7942 100644 --- a/zk/txpool/pool.go +++ b/zk/txpool/pool.go @@ -77,6 +77,8 @@ var ( // Pool is interface for the transaction pool // This interface exists for the convenience of testing, and not yet because // there are multiple implementations +// +//go:generate mockgen -typed=true -destination=./pool_mock.go -package=txpool . Pool type Pool interface { ValidateSerializedTxn(serializedTxn []byte) error diff --git a/zk/txpool/pool_mock.go b/zk/txpool/pool_mock.go new file mode 100644 index 00000000000..10d4aab2fc4 --- /dev/null +++ b/zk/txpool/pool_mock.go @@ -0,0 +1,346 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ledgerwatch/erigon/zk/txpool (interfaces: Pool) +// +// Generated by this command: +// +// mockgen -typed=true -destination=./pool_mock.go -package=txpool . Pool +// + +// Package txpool is a generated GoMock package. +package txpool + +import ( + context "context" + reflect "reflect" + + remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + kv "github.com/ledgerwatch/erigon-lib/kv" + types "github.com/ledgerwatch/erigon-lib/types" + gomock "go.uber.org/mock/gomock" +) + +// MockPool is a mock of Pool interface. +type MockPool struct { + ctrl *gomock.Controller + recorder *MockPoolMockRecorder +} + +// MockPoolMockRecorder is the mock recorder for MockPool. +type MockPoolMockRecorder struct { + mock *MockPool +} + +// NewMockPool creates a new mock instance. +func NewMockPool(ctrl *gomock.Controller) *MockPool { + mock := &MockPool{ctrl: ctrl} + mock.recorder = &MockPoolMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPool) EXPECT() *MockPoolMockRecorder { + return m.recorder +} + +// AddLocalTxs mocks base method. +func (m *MockPool) AddLocalTxs(arg0 context.Context, arg1 types.TxSlots, arg2 kv.Tx) ([]DiscardReason, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddLocalTxs", arg0, arg1, arg2) + ret0, _ := ret[0].([]DiscardReason) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddLocalTxs indicates an expected call of AddLocalTxs. +func (mr *MockPoolMockRecorder) AddLocalTxs(arg0, arg1, arg2 any) *MockPoolAddLocalTxsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLocalTxs", reflect.TypeOf((*MockPool)(nil).AddLocalTxs), arg0, arg1, arg2) + return &MockPoolAddLocalTxsCall{Call: call} +} + +// MockPoolAddLocalTxsCall wrap *gomock.Call +type MockPoolAddLocalTxsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddLocalTxsCall) Return(arg0 []DiscardReason, arg1 error) *MockPoolAddLocalTxsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddLocalTxsCall) Do(f func(context.Context, types.TxSlots, kv.Tx) ([]DiscardReason, error)) *MockPoolAddLocalTxsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddLocalTxsCall) DoAndReturn(f func(context.Context, types.TxSlots, kv.Tx) ([]DiscardReason, error)) *MockPoolAddLocalTxsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddNewGoodPeer mocks base method. +func (m *MockPool) AddNewGoodPeer(arg0 types.PeerID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddNewGoodPeer", arg0) +} + +// AddNewGoodPeer indicates an expected call of AddNewGoodPeer. +func (mr *MockPoolMockRecorder) AddNewGoodPeer(arg0 any) *MockPoolAddNewGoodPeerCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNewGoodPeer", reflect.TypeOf((*MockPool)(nil).AddNewGoodPeer), arg0) + return &MockPoolAddNewGoodPeerCall{Call: call} +} + +// MockPoolAddNewGoodPeerCall wrap *gomock.Call +type MockPoolAddNewGoodPeerCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddNewGoodPeerCall) Return() *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddNewGoodPeerCall) Do(f func(types.PeerID)) *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddNewGoodPeerCall) DoAndReturn(f func(types.PeerID)) *MockPoolAddNewGoodPeerCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// AddRemoteTxs mocks base method. +func (m *MockPool) AddRemoteTxs(arg0 context.Context, arg1 types.TxSlots) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddRemoteTxs", arg0, arg1) +} + +// AddRemoteTxs indicates an expected call of AddRemoteTxs. +func (mr *MockPoolMockRecorder) AddRemoteTxs(arg0, arg1 any) *MockPoolAddRemoteTxsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRemoteTxs", reflect.TypeOf((*MockPool)(nil).AddRemoteTxs), arg0, arg1) + return &MockPoolAddRemoteTxsCall{Call: call} +} + +// MockPoolAddRemoteTxsCall wrap *gomock.Call +type MockPoolAddRemoteTxsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolAddRemoteTxsCall) Return() *MockPoolAddRemoteTxsCall { + c.Call = c.Call.Return() + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolAddRemoteTxsCall) Do(f func(context.Context, types.TxSlots)) *MockPoolAddRemoteTxsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolAddRemoteTxsCall) DoAndReturn(f func(context.Context, types.TxSlots)) *MockPoolAddRemoteTxsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetRlp mocks base method. +func (m *MockPool) GetRlp(arg0 kv.Tx, arg1 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRlp", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRlp indicates an expected call of GetRlp. +func (mr *MockPoolMockRecorder) GetRlp(arg0, arg1 any) *MockPoolGetRlpCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRlp", reflect.TypeOf((*MockPool)(nil).GetRlp), arg0, arg1) + return &MockPoolGetRlpCall{Call: call} +} + +// MockPoolGetRlpCall wrap *gomock.Call +type MockPoolGetRlpCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolGetRlpCall) Return(arg0 []byte, arg1 error) *MockPoolGetRlpCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolGetRlpCall) Do(f func(kv.Tx, []byte) ([]byte, error)) *MockPoolGetRlpCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolGetRlpCall) DoAndReturn(f func(kv.Tx, []byte) ([]byte, error)) *MockPoolGetRlpCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// IdHashKnown mocks base method. +func (m *MockPool) IdHashKnown(arg0 kv.Tx, arg1 []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IdHashKnown", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IdHashKnown indicates an expected call of IdHashKnown. +func (mr *MockPoolMockRecorder) IdHashKnown(arg0, arg1 any) *MockPoolIdHashKnownCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IdHashKnown", reflect.TypeOf((*MockPool)(nil).IdHashKnown), arg0, arg1) + return &MockPoolIdHashKnownCall{Call: call} +} + +// MockPoolIdHashKnownCall wrap *gomock.Call +type MockPoolIdHashKnownCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolIdHashKnownCall) Return(arg0 bool, arg1 error) *MockPoolIdHashKnownCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolIdHashKnownCall) Do(f func(kv.Tx, []byte) (bool, error)) *MockPoolIdHashKnownCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolIdHashKnownCall) DoAndReturn(f func(kv.Tx, []byte) (bool, error)) *MockPoolIdHashKnownCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// OnNewBlock mocks base method. +func (m *MockPool) OnNewBlock(arg0 context.Context, arg1 *remote.StateChangeBatch, arg2, arg3 types.TxSlots, arg4 kv.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnNewBlock", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnNewBlock indicates an expected call of OnNewBlock. +func (mr *MockPoolMockRecorder) OnNewBlock(arg0, arg1, arg2, arg3, arg4 any) *MockPoolOnNewBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNewBlock", reflect.TypeOf((*MockPool)(nil).OnNewBlock), arg0, arg1, arg2, arg3, arg4) + return &MockPoolOnNewBlockCall{Call: call} +} + +// MockPoolOnNewBlockCall wrap *gomock.Call +type MockPoolOnNewBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolOnNewBlockCall) Return(arg0 error) *MockPoolOnNewBlockCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolOnNewBlockCall) Do(f func(context.Context, *remote.StateChangeBatch, types.TxSlots, types.TxSlots, kv.Tx) error) *MockPoolOnNewBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolOnNewBlockCall) DoAndReturn(f func(context.Context, *remote.StateChangeBatch, types.TxSlots, types.TxSlots, kv.Tx) error) *MockPoolOnNewBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// Started mocks base method. +func (m *MockPool) Started() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Started") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Started indicates an expected call of Started. +func (mr *MockPoolMockRecorder) Started() *MockPoolStartedCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Started", reflect.TypeOf((*MockPool)(nil).Started)) + return &MockPoolStartedCall{Call: call} +} + +// MockPoolStartedCall wrap *gomock.Call +type MockPoolStartedCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolStartedCall) Return(arg0 bool) *MockPoolStartedCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolStartedCall) Do(f func() bool) *MockPoolStartedCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolStartedCall) DoAndReturn(f func() bool) *MockPoolStartedCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ValidateSerializedTxn mocks base method. +func (m *MockPool) ValidateSerializedTxn(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateSerializedTxn", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateSerializedTxn indicates an expected call of ValidateSerializedTxn. +func (mr *MockPoolMockRecorder) ValidateSerializedTxn(arg0 any) *MockPoolValidateSerializedTxnCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSerializedTxn", reflect.TypeOf((*MockPool)(nil).ValidateSerializedTxn), arg0) + return &MockPoolValidateSerializedTxnCall{Call: call} +} + +// MockPoolValidateSerializedTxnCall wrap *gomock.Call +type MockPoolValidateSerializedTxnCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockPoolValidateSerializedTxnCall) Return(arg0 error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockPoolValidateSerializedTxnCall) Do(f func([]byte) error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockPoolValidateSerializedTxnCall) DoAndReturn(f func([]byte) error) *MockPoolValidateSerializedTxnCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/zk/txpool/pool_test.go b/zk/txpool/pool_test.go index ef80af15139..1df01735e3e 100644 --- a/zk/txpool/pool_test.go +++ b/zk/txpool/pool_test.go @@ -3,6 +3,7 @@ package txpool import ( "context" "fmt" + "io" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/u256" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" @@ -20,6 +22,8 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" ) func TestNonceFromAddress(t *testing.T) { @@ -169,3 +173,74 @@ func TestNonceFromAddress(t *testing.T) { } } } + +func TestOnNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + coreDB, db := memdb.NewTestDB(t), memdb.NewTestDB(t) + ctrl := gomock.NewController(t) + + stream := remote.NewMockKV_StateChangesClient(ctrl) + i := 0 + stream.EXPECT(). + Recv(). + DoAndReturn(func() (*remote.StateChangeBatch, error) { + if i > 0 { + return nil, io.EOF + } + i++ + return &remote.StateChangeBatch{ + StateVersionId: 1, + ChangeBatch: []*remote.StateChange{ + { + Txs: [][]byte{ + decodeHex(types.TxParseMainnetTests[0].PayloadStr), + decodeHex(types.TxParseMainnetTests[1].PayloadStr), + decodeHex(types.TxParseMainnetTests[2].PayloadStr), + }, + BlockHeight: 1, + BlockHash: gointerfaces.ConvertHashToH256([32]byte{}), + }, + }, + }, nil + }). + AnyTimes() + + stateChanges := remote.NewMockKVClient(ctrl) + stateChanges. + EXPECT(). + StateChanges(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ *remote.StateChangeRequest, _ ...grpc.CallOption) (remote.KV_StateChangesClient, error) { + return stream, nil + }) + + pool := NewMockPool(ctrl) + pool.EXPECT(). + ValidateSerializedTxn(gomock.Any()). + DoAndReturn(func(_ []byte) error { + return nil + }). + Times(3) + + var minedTxs types.TxSlots + pool.EXPECT(). + OnNewBlock(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn( + func( + _ context.Context, + _ *remote.StateChangeBatch, + _ types.TxSlots, + minedTxsArg types.TxSlots, + _ kv.Tx, + ) error { + minedTxs = minedTxsArg + return nil + }, + ). + Times(1) + + fetch := NewFetch(ctx, nil, pool, stateChanges, coreDB, db, *u256.N1) + err := fetch.handleStateChanges(ctx, stateChanges) + assert.ErrorIs(t, io.EOF, err) + assert.Equal(t, 3, len(minedTxs.Txs)) +} diff --git a/zk/txpool/send_test.go b/zk/txpool/send_test.go new file mode 100644 index 00000000000..256e0374c7e --- /dev/null +++ b/zk/txpool/send_test.go @@ -0,0 +1,176 @@ +package txpool + +import ( + "context" + "fmt" + "testing" + + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon-lib/txpool" + types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" +) + +func testRlps(num int) [][]byte { + rlps := make([][]byte, num) + for i := 0; i < num; i++ { + rlps[i] = []byte{1} + } + return rlps +} + +func toHashes(h ...byte) (out types2.Hashes) { + for i := range h { + hash := [32]byte{h[i]} + out = append(out, hash[:]...) + } + return out +} + +func toPeerIDs(h ...byte) (out []types2.PeerID) { + for i := range h { + hash := [64]byte{h[i]} + out = append(out, gointerfaces.ConvertHashToH512(hash)) + } + return out +} + +func TestSendTxPropagate(t *testing.T) { + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + t.Run("few remote byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + send.BroadcastPooledTxs(testRlps(2)) + send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + require.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("much remote byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + list := make(types2.Hashes, p2pTxPacketLimit*3) + for i := 0; i < len(list); i += 32 { + b := []byte(fmt.Sprintf("%x", i)) + copy(list[i:i+32], b) + } + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + send.BroadcastPooledTxs(testRlps(len(list) / 32)) + send.AnnouncePooledTxs([]byte{0, 1, 2}, []uint32{10, 12, 14}, list) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + require.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + require.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("few local byHash", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + requests := make([]*sentry.SendMessageToRandomPeersRequest, 0) + + sentryServer.EXPECT(). + SendMessageToRandomPeers(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageToRandomPeersRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }).Times(1) + + sentryServer.EXPECT().SendMessageToAll(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.OutboundMessageData) (*sentry.SentPeers, error) { + return nil, nil + }).Times(1) + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + send.BroadcastPooledTxs(testRlps(2)) + send.AnnouncePooledTxs([]byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 1, len(requests)) + + txsMessage := requests[0].Data + assert.Equal(t, sentry.MessageId_TRANSACTIONS_66, txsMessage.Id) + assert.True(t, len(txsMessage.Data) > 0) + }) + + t.Run("sync with new peer", func(t *testing.T) { + ctrl := gomock.NewController(t) + sentryServer := sentry.NewMockSentryServer(ctrl) + times := 3 + requests := make([]*sentry.SendMessageByIdRequest, 0, times) + + sentryServer.EXPECT(). + SendMessageById(gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, r *sentry.SendMessageByIdRequest) (*sentry.SentPeers, error) { + requests = append(requests, r) + return nil, nil + }). + Times(times) + + sentryServer.EXPECT().PeerById(gomock.Any(), gomock.Any()). + DoAndReturn( + func(_ context.Context, r *sentry.PeerByIdRequest) (*sentry.PeerByIdReply, error) { + return &sentry.PeerByIdReply{ + Peer: &types.PeerInfo{ + Id: r.PeerId.String(), + Caps: []string{"eth/68"}, + }}, nil + }).AnyTimes() + + m := txpool.NewMockSentry(ctx, sentryServer) + send := NewSend(ctx, []direct.SentryClient{direct.NewSentryClientDirect(direct.ETH68, m)}, nil) + expectPeers := toPeerIDs(1, 2, 42) + send.PropagatePooledTxsToPeersList(expectPeers, []byte{0, 1}, []uint32{10, 15}, toHashes(1, 42)) + + require.Equal(t, 3, len(requests)) + for i, req := range requests { + assert.Equal(t, expectPeers[i], types2.PeerID(req.PeerId)) + assert.Equal(t, sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68, req.Data.Id) + assert.True(t, len(req.Data.Data) > 0) + } + }) +} diff --git a/zk/witness/witness.go b/zk/witness/witness.go index 2350fd250fc..5ae7ac04bcf 100644 --- a/zk/witness/witness.go +++ b/zk/witness/witness.go @@ -35,6 +35,8 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" + "github.com/holiman/uint256" + "math" ) var ( @@ -44,14 +46,15 @@ var ( ) type Generator struct { - tx kv.Tx - dirs datadir.Dirs - historyV3 bool - agg *libstate.Aggregator - blockReader services.FullBlockReader - chainCfg *chain.Config - zkConfig *ethconfig.Zk - engine consensus.EngineReader + tx kv.Tx + dirs datadir.Dirs + historyV3 bool + agg *libstate.Aggregator + blockReader services.FullBlockReader + chainCfg *chain.Config + zkConfig *ethconfig.Zk + engine consensus.EngineReader + forcedContracts []libcommon.Address } func NewGenerator( @@ -62,15 +65,17 @@ func NewGenerator( chainCfg *chain.Config, zkConfig *ethconfig.Zk, engine consensus.EngineReader, + forcedContracs []libcommon.Address, ) *Generator { return &Generator{ - dirs: dirs, - historyV3: historyV3, - agg: agg, - blockReader: blockReader, - chainCfg: chainCfg, - zkConfig: zkConfig, - engine: engine, + dirs: dirs, + historyV3: historyV3, + agg: agg, + blockReader: blockReader, + chainCfg: chainCfg, + zkConfig: zkConfig, + engine: engine, + forcedContracts: forcedContracs, } } @@ -191,6 +196,12 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint log.Info("Generating witness timing", "batch", batchNum, "blockFrom", blocks[0].NumberU64(), "blockTo", blocks[len(blocks)-1].NumberU64(), "taken", diff) }() + areExecutorUrlsEmpty := len(g.zkConfig.ExecutorUrls) == 0 || g.zkConfig.ExecutorUrls[0] == "" + shouldGenerateMockWitness := g.zkConfig.MockWitnessGeneration && areExecutorUrlsEmpty + if shouldGenerateMockWitness { + return g.generateMockWitness(batchNum, blocks, debug) + } + endBlock := blocks[len(blocks)-1].NumberU64() startBlock := blocks[0].NumberU64() @@ -324,7 +335,6 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint chainReader := stagedsync.NewChainReaderImpl(g.chainCfg, tx, nil, log.New()) _, err = core.ExecuteBlockEphemerallyZk(g.chainCfg, &vmConfig, getHashFn, engine, block, tds, trieStateWriter, chainReader, nil, hermezDb, &prevStateRoot) - if err != nil { return nil, err } @@ -332,12 +342,23 @@ func (g *Generator) generateWitness(tx kv.Tx, ctx context.Context, batchNum uint prevStateRoot = block.Root() } + inclusion := make(map[libcommon.Address][]libcommon.Hash) + for _, contract := range g.forcedContracts { + err = reader.ForEachStorage(contract, libcommon.Hash{}, func(key, secKey libcommon.Hash, value uint256.Int) bool { + inclusion[contract] = append(inclusion[contract], key) + return false + }, math.MaxInt64) + if err != nil { + return nil, err + } + } + var rl trie.RetainDecider // if full is true, we will send all the nodes to the witness rl = &trie.AlwaysTrueRetainDecider{} if !witnessFull { - rl, err = tds.ResolveSMTRetainList() + rl, err = tds.ResolveSMTRetainList(inclusion) if err != nil { return nil, err } @@ -362,3 +383,21 @@ func getWitnessBytes(witness *trie.Witness, debug bool) ([]byte, error) { } return buf.Bytes(), nil } + +func (g *Generator) generateMockWitness(batchNum uint64, blocks []*eritypes.Block, debug bool) ([]byte, error) { + mockWitness := []byte("mockWitness") + startBlockNumber := blocks[0].NumberU64() + endBlockNumber := blocks[len(blocks)-1].NumberU64() + + if debug { + log.Info( + "Generated mock witness", + "witness", mockWitness, + "batch", batchNum, + "startBlockNumber", startBlockNumber, + "endBlockNumber", endBlockNumber, + ) + } + + return mockWitness, nil +}