Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cherrypick/until v0.5.0 rc7 #634

Merged
merged 6 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions claimtxman/claimtxman.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *et
var monitorTx ctmtypes.TxMonitorer
if cfg.GroupingClaims.Enabled {
log.Info("ClaimTxManager working in compressor mode to group claim txs")
monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime())
monitorTx = NewMonitorCompressedTxs(ctx, storage.(StorageCompressedInterface), client, cfg, nonceCache, auth, etherMan, utils.NewTimeProviderSystemLocalTime(), cfg.GroupingClaims.GasOffset)
} else {
log.Info("ClaimTxManager working in regular mode to send claim txs individually")
monitorTx = NewMonitorTxs(ctx, storage.(StorageInterface), client, cfg, nonceCache, auth)
Expand Down Expand Up @@ -94,7 +94,8 @@ func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *et
func (tm *ClaimTxManager) Start() {
ticker := time.NewTicker(tm.cfg.FrequencyToMonitorTxs.Duration)
compressorTicker := time.NewTicker(tm.cfg.GroupingClaims.FrequencyToProcessCompressedClaims.Duration)
var ger *etherman.GlobalExitRoot
var ger = &etherman.GlobalExitRoot{}
var latestProcessedGer common.Hash
for {
select {
case <-tm.ctx.Done():
Expand All @@ -121,14 +122,15 @@ func (tm *ClaimTxManager) Start() {
log.Infof("Waiting for networkID %d to be synced before processing deposits", tm.l2NetworkID)
}
case <-compressorTicker.C:
if tm.synced && tm.cfg.GroupingClaims.Enabled {
if tm.synced && tm.cfg.GroupingClaims.Enabled && ger.GlobalExitRoot != latestProcessedGer {
log.Info("Processing deposits for ger: ", ger.GlobalExitRoot)
go func() {
err := tm.updateDepositsStatus(ger)
if err != nil {
log.Errorf("failed to update deposits status: %v", err)
}
}()
latestProcessedGer = ger.GlobalExitRoot
}
case <-ticker.C:
err := tm.monitorTxs.MonitorTxs(tm.ctx)
Expand Down
9 changes: 4 additions & 5 deletions claimtxman/compose_compress_claim.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import (
"fmt"
"math/big"
"slices"
"strings"

ctmtypes "github.com/0xPolygonHermez/zkevm-bridge-service/claimtxman/types"
"github.com/0xPolygonHermez/zkevm-bridge-service/etherman/smartcontracts/claimcompressor"
Expand Down Expand Up @@ -44,13 +43,13 @@ type bridgeClaimXParams struct {
}

type ComposeCompressClaim struct {
smcAbi abi.ABI
bridgeContractABI *abi.ABI
methodClaimAssets abi.Method
methodClaimMessages abi.Method
}

func NewComposeCompressClaim() (*ComposeCompressClaim, error) {
smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmbridge.PolygonzkevmbridgeABI))
smcAbi, err := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi()
if err != nil {
return nil, errors.New("fails to read abi fom Bridge contract")
}
Expand All @@ -63,7 +62,7 @@ func NewComposeCompressClaim() (*ComposeCompressClaim, error) {
return nil, errors.New("method claimMessages not found")
}
return &ComposeCompressClaim{
smcAbi: smcAbi,
bridgeContractABI: smcAbi,
methodClaimAssets: methodClaimAssets,
methodClaimMessages: methodClaimMessages,
}, nil
Expand Down Expand Up @@ -129,7 +128,7 @@ func (c *ComposeCompressClaim) extractParams(data []byte) (*bridgeClaimXParams,
}
func (c *ComposeCompressClaim) extractParamsClaimX(data []byte) (*bridgeClaimXParams, error) {
// do something
method, err := c.smcAbi.MethodById(data[:4])
method, err := c.bridgeContractABI.MethodById(data[:4])
if err != nil {
return nil, fmt.Errorf("extracting params, getting method err: %w ", err)
}
Expand Down
2 changes: 2 additions & 0 deletions claimtxman/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,6 @@ type ConfigGroupingClaims struct {
RetryInterval types.Duration `mapstructure:"RetryInterval"`
// RetryTimeout is the maximum time to wait for a claim tx to be mined
RetryTimeout types.Duration `mapstructure:"RetryTimeout"`
// GasOffset is the offset for the gas estimation
GasOffset uint64 `mapstructure:"GasOffset"`
}
28 changes: 23 additions & 5 deletions claimtxman/monitor_compressed_txs.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ type StorageCompressedInterface interface {

type EthermanI interface {
CompressClaimCall(mainnetExitRoot, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error)
SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (common.Hash, error)
SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error)
}
type MonitorCompressedTxs struct {
storage StorageCompressedInterface
Expand All @@ -48,6 +48,7 @@ type MonitorCompressedTxs struct {
compressClaimComposer *ComposeCompressClaim
timeProvider utils.TimeProvider
triggerGroups *GroupsTrigger
gasOffset uint64
}

func NewMonitorCompressedTxs(ctx context.Context,
Expand All @@ -57,7 +58,8 @@ func NewMonitorCompressedTxs(ctx context.Context,
nonceCache *NonceCache,
auth *bind.TransactOpts,
etherMan EthermanI,
timeProvider utils.TimeProvider) *MonitorCompressedTxs {
timeProvider utils.TimeProvider,
gasOffset uint64) *MonitorCompressedTxs {
composer, err := NewComposeCompressClaim()
if err != nil {
log.Fatal("failed to create ComposeCompressClaim: %v", err)
Expand All @@ -73,6 +75,7 @@ func NewMonitorCompressedTxs(ctx context.Context,
compressClaimComposer: composer,
timeProvider: timeProvider,
triggerGroups: NewGroupsTrigger(cfg.GroupingClaims),
gasOffset: gasOffset,
}
}

Expand Down Expand Up @@ -317,17 +320,32 @@ func (tm *MonitorCompressedTxs) SendClaims(pendingTx *PendingTxs, onlyFirstOne b
continue
}

// Estimating Gas
auth := *tm.auth
auth.NoSend = true
estimatedTx, err := tm.etherMan.SendCompressedClaims(&auth, group.DbEntry.CompressedTxData)
if err != nil {
msg := fmt.Sprintf("failed to call SMC SendCompressedClaims for group %d: %v", group.DbEntry.GroupID, err)
log.Warn(msg)
group.DbEntry.LastLog = msg
continue
}
auth.NoSend = false
log.Debug("estimatedGAS: ", estimatedTx.Gas())
auth.GasLimit = estimatedTx.Gas() + tm.gasOffset
log.Debug("New GAS: ", auth.GasLimit)
// Send claim tx
txHash, err := tm.etherMan.SendCompressedClaims(tm.auth, group.DbEntry.CompressedTxData)
tx, err := tm.etherMan.SendCompressedClaims(&auth, group.DbEntry.CompressedTxData)
if err != nil {
msg := fmt.Sprintf("failed to call SMC SendCompressedClaims for group %d: %v", group.DbEntry.GroupID, err)
log.Warn(msg)
group.DbEntry.LastLog = msg
continue
}
log.Infof("send claim tx try: %d for group_id:%d deposits_id:%s txHash:%s", group.DbEntry.NumRetries, group.DbEntry.GroupID, group.GetTxsDepositIDString(), txHash.String())
log.Debug("Gas used: ", tx.Gas())
log.Infof("Send claim tx try: %d for group_id:%d deposits_id:%s txHash:%s", group.DbEntry.NumRetries, group.DbEntry.GroupID, group.GetTxsDepositIDString(), tx.Hash().String())
group.DbEntry.Status = ctmtypes.MonitoredTxGroupStatusClaiming
group.DbEntry.AddPendingTx(txHash)
group.DbEntry.AddPendingTx(tx.Hash())
group.DbEntry.NumRetries++
}
return nil
Expand Down
3 changes: 2 additions & 1 deletion config/config.debug.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"]
RetryInterval = "10s"
RetryTimeout = "30s"
FrequencyToProcessCompressedClaims = "1m"
GasOffset = 100000

[Etherman]
L1URL = "http://localhost:8545"
Expand Down Expand Up @@ -57,7 +58,7 @@ BridgeVersion = "v1"
MaxConns = 20

[NetworkConfig]
GenBlockNumber = 1
GenBlockNumber = 0
PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"
PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"
PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e"
Expand Down
3 changes: 2 additions & 1 deletion config/config.local.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ AuthorizedClaimMessageAddresses = ["0x90F79bf6EB2c4f870365E785982E1f101E93b906"]
RetryInterval = "10s"
RetryTimeout = "30s"
FrequencyToProcessCompressedClaims = "1m"
GasOffset = 100000

[Etherman]
L1URL = "http://zkevm-mock-l1-network:8545"
Expand Down Expand Up @@ -57,7 +58,7 @@ BridgeVersion = "v1"
MaxConns = 20

[NetworkConfig]
GenBlockNumber = 1
GenBlockNumber = 0
PolygonBridgeAddress = "0xFe12ABaa190Ef0c8638Ee0ba9F828BF41368Ca0E"
PolygonZkEVMGlobalExitRootAddress = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"
PolygonRollupManagerAddress = "0xB7f8BC63BbcaD18155201308C8f3540b07f84F5e"
Expand Down
1 change: 1 addition & 0 deletions config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ AuthorizedClaimMessageAddresses = []
MaxRetries = 2
RetryInterval = "10s"
RetryTimeout = "30s"
GasOffset = 0


[Etherman]
Expand Down
25 changes: 25 additions & 0 deletions db/pgstorage/migrations/0010.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
-- +migrate Up

-- This migration will delete all empty blocks
DELETE FROM sync.block
WHERE NOT EXISTS (SELECT *
FROM sync.claim
WHERE sync.claim.block_id = sync.block.id)
AND NOT EXISTS (SELECT *
FROM sync.deposit
WHERE sync.deposit.block_id = sync.block.id)
AND NOT EXISTS (SELECT *
FROM sync.token_wrapped
WHERE sync.token_wrapped.block_id = sync.block.id)
AND NOT EXISTS (SELECT *
FROM sync.exit_root
WHERE sync.exit_root.block_id = sync.block.id)
AND NOT EXISTS (SELECT *
FROM mt.rollup_exit
WHERE mt.rollup_exit.block_id = sync.block.id)
AND sync.block.id != 0;


-- +migrate Down

-- no action is needed, the data must remain deleted as it is useless
93 changes: 93 additions & 0 deletions db/pgstorage/migrations/0010_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
package migrations_test

import (
"database/sql"
"fmt"
"testing"

"github.com/stretchr/testify/assert"
)

// this migration changes length of the token name
type migrationTest0010 struct{}

func (m migrationTest0010) InsertData(db *sql.DB) error {
addBlocks := `
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(1, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b20', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fe', '2024-03-11 02:52:23.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(2, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b21', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f1', '2024-03-11 02:52:24.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(3, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b22', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f2', '2024-03-11 02:52:25.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(4, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b23', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f3', '2024-03-11 02:52:26.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(5, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b24', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f4', '2024-03-11 02:52:27.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(6, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b25', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f5', '2024-03-11 02:52:28.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(7, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b26', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f6', '2024-03-11 02:52:29.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(8, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b27', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f7', '2024-03-11 02:52:30.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(9, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b28', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f8', '2024-03-11 02:52:31.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(10, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b29', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f9', '2024-03-11 02:52:32.000', 0);
INSERT INTO sync.block
(block_num, block_hash, parent_hash, received_at, network_id)
VALUES(11, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b2a', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fa', '2024-03-11 02:52:33.000', 0);
INSERT INTO sync.claim
(network_id, "index", orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag)
VALUES(1, 0, 0, decode('0000000000000000000000000000000000000000','hex'), '100000000000000000', decode('F35960302A07022ABA880DFFAEC2FDD64D5BF1C1','hex'), 2, decode('3E24EC7286B5138DE66E8B2B854EE957579B2651B3A454AD32C55A985364FAFF','hex'), 0, false);
INSERT INTO sync.deposit
(leaf_type, network_id, orig_net, orig_addr, amount, dest_net, dest_addr, block_id, deposit_cnt, tx_hash, metadata, id, ready_for_claim)
VALUES(0, 1, 0, decode('0000000000000000000000000000000000000000','hex'), '2000000000000000', 0, decode('2536C2745AC4A584656A830F7BDCD329C94E8F30','hex'), 3, 1, decode('1E615900D623C9291992C79ED156A950BE7DA69B8E58A67DC6F2BCDE2EB236FC','hex'), decode('','hex'), 2, true);
INSERT INTO sync.token_wrapped
(network_id, orig_net, orig_token_addr, wrapped_token_addr, block_id, "name", symbol, decimals)
VALUES(1, 0, decode('5AA6D983DECB146A5810BB28CCD2554F29176AB6','hex'), decode('6014E48D6C37CD0953E86F511CF04DDD7C37029D','hex'), 5, 'ToniToken', 'TRM', 18);
INSERT INTO mt.rollup_exit
(id, leaf, rollup_id, root, block_id)
VALUES(1, decode('4C907345C62B48529CE718F3A32E8BE63A3AE02831386A638419C6CBE6606558','hex'), 1, decode('3CAF4160ABD2C2160305420728BDFECC882456DCA00247FEAFC2C00ADA3E19E0','hex'), 6);
INSERT INTO sync.exit_root
(id, block_id, global_exit_root, exit_roots)
VALUES(1, 8, decode('AD3228B676F7D3CD4284A5443F17F1962B36E491B30A40B2405849E597BA5FB5','hex'), '{decode(''5C7830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030'',''hex''),decode(''5C7830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030'',''hex'')}');
`
if _, err := db.Exec(addBlocks); err != nil {
return err
}
blockCount := `SELECT count(*) FROM sync.block`
var count int
err := db.QueryRow(blockCount).Scan(&count)
if err != nil {
return err
}
if count != 12 {
return fmt.Errorf("error: initial wrong number of blocks: %d", count)
}
return nil
}

func (m migrationTest0010) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
blockCount := `SELECT count(*) FROM sync.block`
var count int
err := db.QueryRow(blockCount).Scan(&count)
assert.NoError(t, err)
assert.Equal(t, 6, count)
}

func (m migrationTest0010) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
}

func TestMigration0010(t *testing.T) {
runMigrationTest(t, 10, migrationTest0010{})
}
6 changes: 3 additions & 3 deletions etherman/etherman.go
Original file line number Diff line number Diff line change
Expand Up @@ -765,13 +765,13 @@ func (etherMan *Client) AddExistingRollupEvent(ctx context.Context, vLog types.L
return nil
}

func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (common.Hash, error) {
func (etherMan *Client) SendCompressedClaims(auth *bind.TransactOpts, compressedTxData []byte) (*types.Transaction, error) {
claimTx, err := etherMan.ClaimCompressor.SendCompressedClaims(auth, compressedTxData)
if err != nil {
log.Error("failed to call SMC SendCompressedClaims: %v", err)
return common.Hash{}, err
return nil, err
}
return claimTx.Hash(), err
return claimTx, err
}

func (etherMan *Client) CompressClaimCall(mainnetExitRoot, rollupExitRoot common.Hash, claimData []claimcompressor.ClaimCompressorCompressClaimCallData) ([]byte, error) {
Expand Down
Loading
Loading