diff --git a/bridgectrl/bridgectrl.go b/bridgectrl/bridgectrl.go index 8014d859..ba8c08c0 100644 --- a/bridgectrl/bridgectrl.go +++ b/bridgectrl/bridgectrl.go @@ -17,21 +17,21 @@ const ( // BridgeController struct type BridgeController struct { - exitTrees []*MerkleTree - rollupsTree *MerkleTree - networkIDs map[uint]uint8 + exitTrees []*MerkleTree + rollupsTree *MerkleTree + merkleTreeIDs map[uint32]uint8 } // NewBridgeController creates new BridgeController. -func NewBridgeController(ctx context.Context, cfg Config, networks []uint, mtStore interface{}) (*BridgeController, error) { +func NewBridgeController(ctx context.Context, cfg Config, networkIDs []uint32, mtStore interface{}) (*BridgeController, error) { var ( - networkIDs = make(map[uint]uint8) - exitTrees []*MerkleTree + merkleTreeIDs = make(map[uint32]uint8) + exitTrees []*MerkleTree ) - for i, network := range networks { - networkIDs[network] = uint8(i) - mt, err := NewMerkleTree(ctx, mtStore.(merkleTreeStore), cfg.Height, network) + for i, networkID := range networkIDs { + merkleTreeIDs[networkID] = uint8(i) + mt, err := NewMerkleTree(ctx, mtStore.(merkleTreeStore), cfg.Height, networkID) if err != nil { return nil, err } @@ -44,14 +44,14 @@ func NewBridgeController(ctx context.Context, cfg Config, networks []uint, mtSto } return &BridgeController{ - exitTrees: exitTrees, - rollupsTree: rollupsTree, - networkIDs: networkIDs, + exitTrees: exitTrees, + rollupsTree: rollupsTree, + merkleTreeIDs: merkleTreeIDs, }, nil } -func (bt *BridgeController) GetNetworkID(networkID uint) (uint8, error) { - tID, found := bt.networkIDs[networkID] +func (bt *BridgeController) GetMerkleTreeID(networkID uint32) (uint8, error) { + tID, found := bt.merkleTreeIDs[networkID] if !found { return 0, gerror.ErrNetworkNotRegister } @@ -61,7 +61,7 @@ func (bt *BridgeController) GetNetworkID(networkID uint) (uint8, error) { // AddDeposit adds deposit information to the bridge tree. func (bt *BridgeController) AddDeposit(ctx context.Context, deposit *etherman.Deposit, depositID uint64, dbTx pgx.Tx) error { leaf := hashDeposit(deposit) - tID, err := bt.GetNetworkID(deposit.NetworkID) + tID, err := bt.GetMerkleTreeID(deposit.NetworkID) if err != nil { return err } @@ -69,8 +69,8 @@ func (bt *BridgeController) AddDeposit(ctx context.Context, deposit *etherman.De } // ReorgMT reorg the specific merkle tree. -func (bt *BridgeController) ReorgMT(ctx context.Context, depositCount uint, networkID uint, dbTx pgx.Tx) error { - tID, err := bt.GetNetworkID(networkID) +func (bt *BridgeController) ReorgMT(ctx context.Context, depositCount uint32, networkID uint32, dbTx pgx.Tx) error { + tID, err := bt.GetMerkleTreeID(networkID) if err != nil { return err } @@ -79,8 +79,8 @@ func (bt *BridgeController) ReorgMT(ctx context.Context, depositCount uint, netw // GetExitRoot returns the dedicated merkle tree's root. // only use for the test purpose -func (bt *BridgeController) GetExitRoot(ctx context.Context, networkID int, dbTx pgx.Tx) ([]byte, error) { - return bt.exitTrees[networkID].getRoot(ctx, dbTx) +func (bt *BridgeController) GetExitRoot(ctx context.Context, tID uint8, dbTx pgx.Tx) ([]byte, error) { + return bt.exitTrees[tID].getRoot(ctx, dbTx) } func (bt *BridgeController) AddRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx pgx.Tx) error { diff --git a/bridgectrl/bridgectrl_test.go b/bridgectrl/bridgectrl_test.go index 7e7a83f0..cae7f0cb 100644 --- a/bridgectrl/bridgectrl_test.go +++ b/bridgectrl/bridgectrl_test.go @@ -50,7 +50,7 @@ func TestBridgeTree(t *testing.T) { store, err := pgstorage.NewPostgresStorage(dbCfg) require.NoError(t, err) ctx := context.Background() - bt, err := NewBridgeController(ctx, cfg, []uint{0, 1000}, store) + bt, err := NewBridgeController(ctx, cfg, []uint32{0, 1000}, store) require.NoError(t, err) t.Run("Test adding deposit for the bridge tree", func(t *testing.T) { @@ -71,7 +71,7 @@ func TestBridgeTree(t *testing.T) { DestinationNetwork: testVector.DestinationNetwork, DestinationAddress: common.HexToAddress(testVector.DestinationAddress), BlockID: blockID, - DepositCount: uint(i), + DepositCount: uint32(i), Metadata: common.FromHex(testVector.Metadata), } leafHash := hashDeposit(deposit) @@ -82,10 +82,10 @@ func TestBridgeTree(t *testing.T) { require.NoError(t, err) // test reorg - orgRoot, err := bt.exitTrees[0].store.GetRoot(ctx, uint(i), 0, nil) + orgRoot, err := bt.exitTrees[0].store.GetRoot(ctx, uint32(i), 0, nil) require.NoError(t, err) require.NoError(t, store.Reset(ctx, uint64(i), deposit.NetworkID, nil)) - err = bt.ReorgMT(ctx, uint(i), testVectors[i].OriginalNetwork, nil) + err = bt.ReorgMT(ctx, uint32(i), testVectors[i].OriginalNetwork, nil) require.NoError(t, err) blockID, err = store.AddBlock(context.TODO(), block, nil) require.NoError(t, err) @@ -94,7 +94,7 @@ func TestBridgeTree(t *testing.T) { require.NoError(t, err) err = bt.AddDeposit(ctx, deposit, depositID, nil) require.NoError(t, err) - newRoot, err := bt.exitTrees[0].store.GetRoot(ctx, uint(i), 0, nil) + newRoot, err := bt.exitTrees[0].store.GetRoot(ctx, uint32(i), 0, nil) require.NoError(t, err) assert.Equal(t, orgRoot, newRoot) diff --git a/bridgectrl/interfaces.go b/bridgectrl/interfaces.go index a3feccf1..55dfe6b2 100644 --- a/bridgectrl/interfaces.go +++ b/bridgectrl/interfaces.go @@ -12,9 +12,9 @@ import ( type merkleTreeStore interface { Get(ctx context.Context, key []byte, dbTx pgx.Tx) ([][]byte, error) BulkSet(ctx context.Context, rows [][]interface{}, dbTx pgx.Tx) error - GetRoot(ctx context.Context, depositCount uint, network uint, dbTx pgx.Tx) ([]byte, error) - SetRoot(ctx context.Context, root []byte, depositID uint64, network uint, dbTx pgx.Tx) error - GetLastDepositCount(ctx context.Context, network uint, dbTx pgx.Tx) (uint, error) + GetRoot(ctx context.Context, depositCount uint32, network uint32, dbTx pgx.Tx) ([]byte, error) + SetRoot(ctx context.Context, root []byte, depositID uint64, network uint32, dbTx pgx.Tx) error + GetLastDepositCount(ctx context.Context, networkID uint32, dbTx pgx.Tx) (uint32, error) AddRollupExitLeaves(ctx context.Context, rows [][]interface{}, dbTx pgx.Tx) error GetRollupExitLeavesByRoot(ctx context.Context, root common.Hash, dbTx pgx.Tx) ([]etherman.RollupExitLeaf, error) GetLatestRollupExitLeaves(ctx context.Context, dbTx pgx.Tx) ([]etherman.RollupExitLeaf, error) diff --git a/bridgectrl/merkletree.go b/bridgectrl/merkletree.go index 43bdb8a9..2af05b2a 100644 --- a/bridgectrl/merkletree.go +++ b/bridgectrl/merkletree.go @@ -17,12 +17,12 @@ var zeroHashes [][KeyLen]byte // MerkleTree struct type MerkleTree struct { // store is the database storage to store all node data - store merkleTreeStore - network uint + store merkleTreeStore + networkID uint32 // height is the depth of the merkle tree height uint8 // count is the number of deposit - count uint + count uint32 // siblings is the array of sibling of the last leaf added siblings [][KeyLen]byte } @@ -38,8 +38,8 @@ func init() { } // NewMerkleTree creates new MerkleTree. -func NewMerkleTree(ctx context.Context, store merkleTreeStore, height uint8, network uint) (*MerkleTree, error) { - depositCnt, err := store.GetLastDepositCount(ctx, network, nil) +func NewMerkleTree(ctx context.Context, store merkleTreeStore, height uint8, networkID uint32) (*MerkleTree, error) { + depositCnt, err := store.GetLastDepositCount(ctx, networkID, nil) if err != nil { if err != gerror.ErrStorageNotFound { return nil, err @@ -50,10 +50,10 @@ func NewMerkleTree(ctx context.Context, store merkleTreeStore, height uint8, net } mt := &MerkleTree{ - store: store, - network: network, - height: height, - count: depositCnt, + store: store, + networkID: networkID, + height: height, + count: depositCnt, } mt.siblings, err = mt.initSiblings(ctx, nil) @@ -110,7 +110,7 @@ func (mt *MerkleTree) initSiblings(ctx context.Context, dbTx pgx.Tx) ([][KeyLen] return siblings, nil } -func (mt *MerkleTree) addLeaf(ctx context.Context, depositID uint64, leaf [KeyLen]byte, index uint, dbTx pgx.Tx) error { +func (mt *MerkleTree) addLeaf(ctx context.Context, depositID uint64, leaf [KeyLen]byte, index uint32, dbTx pgx.Tx) error { if index != mt.count { return fmt.Errorf("mismatched deposit count: %d, expected: %d", index, mt.count) } @@ -141,7 +141,7 @@ func (mt *MerkleTree) addLeaf(ctx context.Context, depositID uint64, leaf [KeyLe } } - err := mt.store.SetRoot(ctx, cur[:], depositID, mt.network, dbTx) + err := mt.store.SetRoot(ctx, cur[:], depositID, mt.networkID, dbTx) if err != nil { return err } @@ -157,7 +157,7 @@ func (mt *MerkleTree) addLeaf(ctx context.Context, depositID uint64, leaf [KeyLe return nil } -func (mt *MerkleTree) resetLeaf(ctx context.Context, depositCount uint, dbTx pgx.Tx) error { +func (mt *MerkleTree) resetLeaf(ctx context.Context, depositCount uint32, dbTx pgx.Tx) error { var err error mt.count = depositCount mt.siblings, err = mt.initSiblings(ctx, dbTx) @@ -169,7 +169,7 @@ func (mt *MerkleTree) getRoot(ctx context.Context, dbTx pgx.Tx) ([]byte, error) if mt.count == 0 { return zeroHashes[mt.height][:], nil } - return mt.store.GetRoot(ctx, mt.count-1, mt.network, dbTx) + return mt.store.GetRoot(ctx, mt.count-1, mt.networkID, dbTx) } func buildIntermediate(leaves [][KeyLen]byte) ([][][]byte, [][32]byte) { @@ -191,7 +191,7 @@ func (mt *MerkleTree) updateLeaf(ctx context.Context, depositID uint64, leaves [ nodes [][][][]byte ns [][][]byte ) - initLeavesCount := uint(len(leaves)) + initLeavesCount := uint32(len(leaves)) if len(leaves) == 0 { leaves = append(leaves, zeroHashes[0]) } @@ -207,7 +207,7 @@ func (mt *MerkleTree) updateLeaf(ctx context.Context, depositID uint64, leaves [ return fmt.Errorf("error: more than one root detected: %+v", nodes) } log.Debug("Root calculated: ", common.Bytes2Hex(ns[0][0])) - err := mt.store.SetRoot(ctx, ns[0][0], depositID, mt.network, dbTx) + err := mt.store.SetRoot(ctx, ns[0][0], depositID, mt.networkID, dbTx) if err != nil { return err } @@ -329,7 +329,7 @@ func (mt MerkleTree) addRollupExitLeaf(ctx context.Context, rollupLeaf etherman. for i := len(storedRollupLeaves); i < int(rollupLeaf.RollupId); i++ { storedRollupLeaves = append(storedRollupLeaves, etherman.RollupExitLeaf{ BlockID: rollupLeaf.BlockID, - RollupId: uint(i + 1), + RollupId: uint32(i + 1), }) } if storedRollupLeaves[rollupLeaf.RollupId-1].RollupId == rollupLeaf.RollupId { @@ -352,7 +352,7 @@ func (mt MerkleTree) addRollupExitLeaf(ctx context.Context, rollupLeaf etherman. return nil } -func ComputeSiblings(rollupIndex uint, leaves [][KeyLen]byte, height uint8) ([][KeyLen]byte, common.Hash, error) { +func ComputeSiblings(rollupIndex uint32, leaves [][KeyLen]byte, height uint8) ([][KeyLen]byte, common.Hash, error) { var ns [][][]byte if len(leaves) == 0 { leaves = append(leaves, zeroHashes[0]) @@ -382,7 +382,7 @@ func ComputeSiblings(rollupIndex uint, leaves [][KeyLen]byte, height uint8) ([][ } // Find the index of the leave in the next level of the tree. // Divide the index by 2 to find the position in the upper level - index = uint(float64(index) / 2) //nolint:gomnd + index = uint32(float64(index) / 2) //nolint:gomnd ns = nsi leaves = hashes } diff --git a/bridgectrl/merkletree_test.go b/bridgectrl/merkletree_test.go index 30124bdd..a5a725ff 100644 --- a/bridgectrl/merkletree_test.go +++ b/bridgectrl/merkletree_test.go @@ -66,7 +66,7 @@ func TestLeafHash(t *testing.T) { DestinationNetwork: testVector.DestinationNetwork, DestinationAddress: common.HexToAddress(testVector.DestinationAddress), BlockNumber: 0, - DepositCount: uint(ti + 1), + DepositCount: uint32(ti + 1), Metadata: common.FromHex(testVector.Metadata), } leafHash := hashDeposit(deposit) @@ -111,7 +111,7 @@ func TestMTAddLeaf(t *testing.T) { DestinationNetwork: testVector.NewLeaf.DestinationNetwork, DestinationAddress: common.HexToAddress(testVector.NewLeaf.DestinationAddress), BlockNumber: 0, - DepositCount: uint(i), + DepositCount: uint32(i), Metadata: common.FromHex(testVector.NewLeaf.Metadata), } depositID, err := store.AddDeposit(ctx, deposit, nil) @@ -123,7 +123,7 @@ func TestMTAddLeaf(t *testing.T) { leafValue, err := formatBytes32String(leaf[2:]) require.NoError(t, err) - err = mt.addLeaf(ctx, depositIDs[i], leafValue, uint(i), nil) + err = mt.addLeaf(ctx, depositIDs[i], leafValue, uint32(i), nil) require.NoError(t, err) } curRoot, err := mt.getRoot(ctx, nil) @@ -131,7 +131,7 @@ func TestMTAddLeaf(t *testing.T) { assert.Equal(t, hex.EncodeToString(curRoot), testVector.CurrentRoot[2:]) leafHash := hashDeposit(deposit) - err = mt.addLeaf(ctx, depositIDs[len(depositIDs)-1], leafHash, uint(len(testVector.ExistingLeaves)), nil) + err = mt.addLeaf(ctx, depositIDs[len(depositIDs)-1], leafHash, uint32(len(testVector.ExistingLeaves)), nil) require.NoError(t, err) newRoot, err := mt.getRoot(ctx, nil) require.NoError(t, err) @@ -179,7 +179,7 @@ func TestMTGetProof(t *testing.T) { DestinationNetwork: leaf.DestinationNetwork, DestinationAddress: common.HexToAddress(leaf.DestinationAddress), BlockID: blockID, - DepositCount: uint(li), + DepositCount: uint32(li), Metadata: common.FromHex(leaf.Metadata), } depositID, err := store.AddDeposit(ctx, deposit, nil) @@ -188,7 +188,7 @@ func TestMTGetProof(t *testing.T) { if li == int(testVector.Index) { cur = leafHash } - err = mt.addLeaf(ctx, depositID, leafHash, uint(li), nil) + err = mt.addLeaf(ctx, depositID, leafHash, uint32(li), nil) require.NoError(t, err) } root, err := mt.getRoot(ctx, nil) @@ -239,7 +239,7 @@ func TestUpdateMT(t *testing.T) { DestinationNetwork: testVector.NewLeaf.DestinationNetwork, DestinationAddress: common.HexToAddress(testVector.NewLeaf.DestinationAddress), BlockNumber: 0, - DepositCount: uint(i), + DepositCount: uint32(i), Metadata: common.FromHex(testVector.NewLeaf.Metadata), } _, err := store.AddDeposit(ctx, deposit, nil) @@ -348,7 +348,7 @@ func TestBuildMTRootAndStore(t *testing.T) { require.Equal(t, len(leaves), len(result)) require.Equal(t, leaves[i][:], result[i].Leaf.Bytes()) require.Equal(t, newRoot, result[i].Root) - require.Equal(t, uint(i+1), result[i].RollupId) + require.Equal(t, uint32(i+1), result[i].RollupId) } } } diff --git a/bridgectrl/pb/query.pb.go b/bridgectrl/pb/query.pb.go index 765982b1..f152b814 100644 --- a/bridgectrl/pb/query.pb.go +++ b/bridgectrl/pb/query.pb.go @@ -133,7 +133,7 @@ type Deposit struct { DestNet uint32 `protobuf:"varint,5,opt,name=dest_net,json=destNet,proto3" json:"dest_net,omitempty"` DestAddr string `protobuf:"bytes,6,opt,name=dest_addr,json=destAddr,proto3" json:"dest_addr,omitempty"` BlockNum uint64 `protobuf:"varint,7,opt,name=block_num,json=blockNum,proto3" json:"block_num,omitempty"` - DepositCnt uint64 `protobuf:"varint,8,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` + DepositCnt uint32 `protobuf:"varint,8,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` NetworkId uint32 `protobuf:"varint,9,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` TxHash string `protobuf:"bytes,10,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` ClaimTxHash string `protobuf:"bytes,11,opt,name=claim_tx_hash,json=claimTxHash,proto3" json:"claim_tx_hash,omitempty"` @@ -223,7 +223,7 @@ func (x *Deposit) GetBlockNum() uint64 { return 0 } -func (x *Deposit) GetDepositCnt() uint64 { +func (x *Deposit) GetDepositCnt() uint32 { if x != nil { return x.DepositCnt } @@ -278,7 +278,7 @@ type Claim struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` OrigNet uint32 `protobuf:"varint,2,opt,name=orig_net,json=origNet,proto3" json:"orig_net,omitempty"` OrigAddr string `protobuf:"bytes,3,opt,name=orig_addr,json=origAddr,proto3" json:"orig_addr,omitempty"` Amount string `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` @@ -286,7 +286,7 @@ type Claim struct { DestAddr string `protobuf:"bytes,6,opt,name=dest_addr,json=destAddr,proto3" json:"dest_addr,omitempty"` BlockNum uint64 `protobuf:"varint,7,opt,name=block_num,json=blockNum,proto3" json:"block_num,omitempty"` TxHash string `protobuf:"bytes,8,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` - RollupIndex uint64 `protobuf:"varint,9,opt,name=rollup_index,json=rollupIndex,proto3" json:"rollup_index,omitempty"` + RollupIndex uint32 `protobuf:"varint,9,opt,name=rollup_index,json=rollupIndex,proto3" json:"rollup_index,omitempty"` MainnetFlag bool `protobuf:"varint,10,opt,name=mainnet_flag,json=mainnetFlag,proto3" json:"mainnet_flag,omitempty"` } @@ -322,7 +322,7 @@ func (*Claim) Descriptor() ([]byte, []int) { return file_query_proto_rawDescGZIP(), []int{2} } -func (x *Claim) GetIndex() uint64 { +func (x *Claim) GetIndex() uint32 { if x != nil { return x.Index } @@ -378,7 +378,7 @@ func (x *Claim) GetTxHash() string { return "" } -func (x *Claim) GetRollupIndex() uint64 { +func (x *Claim) GetRollupIndex() uint32 { if x != nil { return x.RollupIndex } @@ -508,7 +508,7 @@ type GetBridgesRequest struct { unknownFields protoimpl.UnknownFields DestAddr string `protobuf:"bytes,1,opt,name=dest_addr,json=destAddr,proto3" json:"dest_addr,omitempty"` - Offset uint64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` } @@ -551,7 +551,7 @@ func (x *GetBridgesRequest) GetDestAddr() string { return "" } -func (x *GetBridgesRequest) GetOffset() uint64 { +func (x *GetBridgesRequest) GetOffset() uint32 { if x != nil { return x.Offset } @@ -571,9 +571,9 @@ type GetPendingBridgesRequest struct { unknownFields protoimpl.UnknownFields DestAddr string `protobuf:"bytes,1,opt,name=dest_addr,json=destAddr,proto3" json:"dest_addr,omitempty"` - DestNet uint64 `protobuf:"varint,2,opt,name=dest_net,json=destNet,proto3" json:"dest_net,omitempty"` + DestNet uint32 `protobuf:"varint,2,opt,name=dest_net,json=destNet,proto3" json:"dest_net,omitempty"` LeafType uint32 `protobuf:"varint,3,opt,name=leaf_type,json=leafType,proto3" json:"leaf_type,omitempty"` - Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Offset uint32 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` } @@ -616,7 +616,7 @@ func (x *GetPendingBridgesRequest) GetDestAddr() string { return "" } -func (x *GetPendingBridgesRequest) GetDestNet() uint64 { +func (x *GetPendingBridgesRequest) GetDestNet() uint32 { if x != nil { return x.DestNet } @@ -630,7 +630,7 @@ func (x *GetPendingBridgesRequest) GetLeafType() uint32 { return 0 } -func (x *GetPendingBridgesRequest) GetOffset() uint64 { +func (x *GetPendingBridgesRequest) GetOffset() uint32 { if x != nil { return x.Offset } @@ -650,7 +650,7 @@ type GetProofRequest struct { unknownFields protoimpl.UnknownFields NetId uint32 `protobuf:"varint,1,opt,name=net_id,json=netId,proto3" json:"net_id,omitempty"` - DepositCnt uint64 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` + DepositCnt uint32 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` } func (x *GetProofRequest) Reset() { @@ -692,7 +692,7 @@ func (x *GetProofRequest) GetNetId() uint32 { return 0 } -func (x *GetProofRequest) GetDepositCnt() uint64 { +func (x *GetProofRequest) GetDepositCnt() uint32 { if x != nil { return x.DepositCnt } @@ -705,7 +705,7 @@ type GetProofByGERRequest struct { unknownFields protoimpl.UnknownFields NetId uint32 `protobuf:"varint,1,opt,name=net_id,json=netId,proto3" json:"net_id,omitempty"` - DepositCnt uint64 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` + DepositCnt uint32 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` Ger string `protobuf:"bytes,3,opt,name=ger,proto3" json:"ger,omitempty"` } @@ -748,7 +748,7 @@ func (x *GetProofByGERRequest) GetNetId() uint32 { return 0 } -func (x *GetProofByGERRequest) GetDepositCnt() uint64 { +func (x *GetProofByGERRequest) GetDepositCnt() uint32 { if x != nil { return x.DepositCnt } @@ -823,7 +823,7 @@ type GetBridgeRequest struct { unknownFields protoimpl.UnknownFields NetId uint32 `protobuf:"varint,1,opt,name=net_id,json=netId,proto3" json:"net_id,omitempty"` - DepositCnt uint64 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` + DepositCnt uint32 `protobuf:"varint,2,opt,name=deposit_cnt,json=depositCnt,proto3" json:"deposit_cnt,omitempty"` } func (x *GetBridgeRequest) Reset() { @@ -865,7 +865,7 @@ func (x *GetBridgeRequest) GetNetId() uint32 { return 0 } -func (x *GetBridgeRequest) GetDepositCnt() uint64 { +func (x *GetBridgeRequest) GetDepositCnt() uint32 { if x != nil { return x.DepositCnt } @@ -878,7 +878,7 @@ type GetClaimsRequest struct { unknownFields protoimpl.UnknownFields DestAddr string `protobuf:"bytes,1,opt,name=dest_addr,json=destAddr,proto3" json:"dest_addr,omitempty"` - Offset uint64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` } @@ -921,7 +921,7 @@ func (x *GetClaimsRequest) GetDestAddr() string { return "" } -func (x *GetClaimsRequest) GetOffset() uint64 { +func (x *GetClaimsRequest) GetOffset() uint32 { if x != nil { return x.Offset } @@ -1268,7 +1268,7 @@ var file_query_proto_rawDesc = []byte{ 0x64, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, + 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, @@ -1283,7 +1283,7 @@ var file_query_proto_rawDesc = []byte{ 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xa5, 0x02, 0x0a, 0x05, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x69, + 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x69, 0x67, 0x5f, 0x6e, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x6f, 0x72, 0x69, 0x67, 0x4e, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x72, 0x69, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x72, 0x69, 0x67, 0x41, 0x64, 0x64, @@ -1297,7 +1297,7 @@ var file_query_proto_rawDesc = []byte{ 0x75, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0b, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, + 0x0d, 0x52, 0x0b, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x69, 0x6e, 0x6e, 0x65, 0x74, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x69, 0x6e, 0x6e, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x22, 0xaa, 0x01, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x21, 0x0a, 0x0c, 0x6d, @@ -1316,27 +1316,27 @@ var file_query_proto_rawDesc = []byte{ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x64, - 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x64, + 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x49, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64, - 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x22, 0x60, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x42, 0x79, 0x47, 0x45, 0x52, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64, - 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x67, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, @@ -1348,12 +1348,12 @@ var file_query_proto_rawDesc = []byte{ 0x65, 0x74, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x43, 0x6e, 0x74, 0x22, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x24, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x50, 0x49, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x70, diff --git a/claimtxman/claimtxman.go b/claimtxman/claimtxman.go index 5648090f..4ac2b34f 100644 --- a/claimtxman/claimtxman.go +++ b/claimtxman/claimtxman.go @@ -32,14 +32,14 @@ type ClaimTxManager struct { // client is the ethereum client l2Node *utils.Client - l2NetworkID uint + l2NetworkID uint32 bridgeService bridgeServiceInterface cfg Config chExitRootEvent chan *etherman.GlobalExitRoot - chSynced chan uint + chSynced chan uint32 storage StorageInterface auth *bind.TransactOpts - rollupID uint + rollupID uint32 synced bool nonceCache *NonceCache monitorTxs types.TxMonitorer @@ -47,13 +47,13 @@ type ClaimTxManager struct { // NewClaimTxManager creates a new claim transaction manager. func NewClaimTxManager(ctx context.Context, cfg Config, chExitRootEvent chan *etherman.GlobalExitRoot, - chSynced chan uint, + chSynced chan uint32, l2NodeURL string, - l2NetworkID uint, + l2NetworkID uint32, l2BridgeAddr common.Address, bridgeService bridgeServiceInterface, storage interface{}, - rollupID uint, + rollupID uint32, etherMan EthermanI, nonceCache *NonceCache, auth *bind.TransactOpts) (*ClaimTxManager, error) { diff --git a/claimtxman/claimtxman_test.go b/claimtxman/claimtxman_test.go index dd150f0f..3df53324 100644 --- a/claimtxman/claimtxman_test.go +++ b/claimtxman/claimtxman_test.go @@ -192,8 +192,8 @@ func TestUpdateDepositStatus(t *testing.T) { require.NoError(t, err) require.Len(t, deposits, 1) require.True(t, deposits[0].ReadyForClaim) - require.Equal(t, uint(1), deposits[0].DepositCount) - require.Equal(t, uint(0), deposits[0].NetworkID) + require.Equal(t, uint32(1), deposits[0].DepositCount) + require.Equal(t, uint32(0), deposits[0].NetworkID) require.NoError(t, pg.UpdateL2DepositsStatus(ctx, l2Root, 1, 1, nil)) deposits, err = pg.GetDeposits(ctx, destAdr, 10, 0, nil) diff --git a/claimtxman/interfaces.go b/claimtxman/interfaces.go index c72c5afa..cb5d5566 100644 --- a/claimtxman/interfaces.go +++ b/claimtxman/interfaces.go @@ -12,11 +12,11 @@ import ( type StorageInterface interface { AddBlock(ctx context.Context, block *etherman.Block, dbTx pgx.Tx) (uint64, error) - UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) - UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint, dbTx pgx.Tx) error + UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) + UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint32, dbTx pgx.Tx) error AddClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error UpdateClaimTx(ctx context.Context, mTx types.MonitoredTx, dbTx pgx.Tx) error - GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) + GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]types.MonitoredTx, error) // atomic Rollback(ctx context.Context, dbTx pgx.Tx) error BeginDBTransaction(ctx context.Context) (pgx.Tx, error) @@ -24,6 +24,6 @@ type StorageInterface interface { } type bridgeServiceInterface interface { - GetClaimProofForCompressed(ger common.Hash, depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) - GetDepositStatus(ctx context.Context, depositCount, networkID, destNetworkID uint) (string, error) + GetClaimProofForCompressed(ger common.Hash, depositCnt, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) + GetDepositStatus(ctx context.Context, depositCount, networkID, destNetworkID uint32) (string, error) } diff --git a/claimtxman/mocks/bridge_service_interface.go b/claimtxman/mocks/bridge_service_interface.go index 2ddd355d..4f8c32af 100644 --- a/claimtxman/mocks/bridge_service_interface.go +++ b/claimtxman/mocks/bridge_service_interface.go @@ -28,7 +28,7 @@ func (_m *bridgeServiceInterface) EXPECT() *bridgeServiceInterface_Expecter { } // GetClaimProofForCompressed provides a mock function with given fields: ger, depositCnt, networkID, dbTx -func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, depositCnt uint, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error) { +func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, depositCnt uint32, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error) { ret := _m.Called(ger, depositCnt, networkID, dbTx) if len(ret) == 0 { @@ -39,10 +39,10 @@ func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, de var r1 [][32]byte var r2 [][32]byte var r3 error - if rf, ok := ret.Get(0).(func(common.Hash, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)); ok { + if rf, ok := ret.Get(0).(func(common.Hash, uint32, uint32, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)); ok { return rf(ger, depositCnt, networkID, dbTx) } - if rf, ok := ret.Get(0).(func(common.Hash, uint, uint, pgx.Tx) *etherman.GlobalExitRoot); ok { + if rf, ok := ret.Get(0).(func(common.Hash, uint32, uint32, pgx.Tx) *etherman.GlobalExitRoot); ok { r0 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(0) != nil { @@ -50,7 +50,7 @@ func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, de } } - if rf, ok := ret.Get(1).(func(common.Hash, uint, uint, pgx.Tx) [][32]byte); ok { + if rf, ok := ret.Get(1).(func(common.Hash, uint32, uint32, pgx.Tx) [][32]byte); ok { r1 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(1) != nil { @@ -58,7 +58,7 @@ func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, de } } - if rf, ok := ret.Get(2).(func(common.Hash, uint, uint, pgx.Tx) [][32]byte); ok { + if rf, ok := ret.Get(2).(func(common.Hash, uint32, uint32, pgx.Tx) [][32]byte); ok { r2 = rf(ger, depositCnt, networkID, dbTx) } else { if ret.Get(2) != nil { @@ -66,7 +66,7 @@ func (_m *bridgeServiceInterface) GetClaimProofForCompressed(ger common.Hash, de } } - if rf, ok := ret.Get(3).(func(common.Hash, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(3).(func(common.Hash, uint32, uint32, pgx.Tx) error); ok { r3 = rf(ger, depositCnt, networkID, dbTx) } else { r3 = ret.Error(3) @@ -82,16 +82,16 @@ type bridgeServiceInterface_GetClaimProofForCompressed_Call struct { // GetClaimProofForCompressed is a helper method to define mock.On call // - ger common.Hash -// - depositCnt uint -// - networkID uint +// - depositCnt uint32 +// - networkID uint32 // - dbTx pgx.Tx func (_e *bridgeServiceInterface_Expecter) GetClaimProofForCompressed(ger interface{}, depositCnt interface{}, networkID interface{}, dbTx interface{}) *bridgeServiceInterface_GetClaimProofForCompressed_Call { return &bridgeServiceInterface_GetClaimProofForCompressed_Call{Call: _e.mock.On("GetClaimProofForCompressed", ger, depositCnt, networkID, dbTx)} } -func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) Run(run func(ger common.Hash, depositCnt uint, networkID uint, dbTx pgx.Tx)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { +func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) Run(run func(ger common.Hash, depositCnt uint32, networkID uint32, dbTx pgx.Tx)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(common.Hash), args[1].(uint32), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -101,13 +101,13 @@ func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) Return(_a0 *et return _c } -func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) RunAndReturn(run func(common.Hash, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { +func (_c *bridgeServiceInterface_GetClaimProofForCompressed_Call) RunAndReturn(run func(common.Hash, uint32, uint32, pgx.Tx) (*etherman.GlobalExitRoot, [][32]byte, [][32]byte, error)) *bridgeServiceInterface_GetClaimProofForCompressed_Call { _c.Call.Return(run) return _c } // GetDepositStatus provides a mock function with given fields: ctx, depositCount, networkID, destNetworkID -func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositCount uint, networkID uint, destNetworkID uint) (string, error) { +func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositCount uint32, networkID uint32, destNetworkID uint32) (string, error) { ret := _m.Called(ctx, depositCount, networkID, destNetworkID) if len(ret) == 0 { @@ -116,16 +116,16 @@ func (_m *bridgeServiceInterface) GetDepositStatus(ctx context.Context, depositC var r0 string var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint) (string, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, uint32) (string, error)); ok { return rf(ctx, depositCount, networkID, destNetworkID) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint) string); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, uint32) string); ok { r0 = rf(ctx, depositCount, networkID, destNetworkID) } else { r0 = ret.Get(0).(string) } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint, uint) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, uint32) error); ok { r1 = rf(ctx, depositCount, networkID, destNetworkID) } else { r1 = ret.Error(1) @@ -141,16 +141,16 @@ type bridgeServiceInterface_GetDepositStatus_Call struct { // GetDepositStatus is a helper method to define mock.On call // - ctx context.Context -// - depositCount uint -// - networkID uint -// - destNetworkID uint +// - depositCount uint32 +// - networkID uint32 +// - destNetworkID uint32 func (_e *bridgeServiceInterface_Expecter) GetDepositStatus(ctx interface{}, depositCount interface{}, networkID interface{}, destNetworkID interface{}) *bridgeServiceInterface_GetDepositStatus_Call { return &bridgeServiceInterface_GetDepositStatus_Call{Call: _e.mock.On("GetDepositStatus", ctx, depositCount, networkID, destNetworkID)} } -func (_c *bridgeServiceInterface_GetDepositStatus_Call) Run(run func(ctx context.Context, depositCount uint, networkID uint, destNetworkID uint)) *bridgeServiceInterface_GetDepositStatus_Call { +func (_c *bridgeServiceInterface_GetDepositStatus_Call) Run(run func(ctx context.Context, depositCount uint32, networkID uint32, destNetworkID uint32)) *bridgeServiceInterface_GetDepositStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(uint)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(uint32)) }) return _c } @@ -160,7 +160,7 @@ func (_c *bridgeServiceInterface_GetDepositStatus_Call) Return(_a0 string, _a1 e return _c } -func (_c *bridgeServiceInterface_GetDepositStatus_Call) RunAndReturn(run func(context.Context, uint, uint, uint) (string, error)) *bridgeServiceInterface_GetDepositStatus_Call { +func (_c *bridgeServiceInterface_GetDepositStatus_Call) RunAndReturn(run func(context.Context, uint32, uint32, uint32) (string, error)) *bridgeServiceInterface_GetDepositStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/mocks/storage_compressed_interface.go b/claimtxman/mocks/storage_compressed_interface.go index ff706a23..5d28f2fc 100644 --- a/claimtxman/mocks/storage_compressed_interface.go +++ b/claimtxman/mocks/storage_compressed_interface.go @@ -178,7 +178,7 @@ func (_c *StorageCompressedInterface_Commit_Call) RunAndReturn(run func(context. } // GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, rollupID, dbTx -func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) { +func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]types.MonitoredTx, error) { ret := _m.Called(ctx, statuses, rollupID, dbTx) if len(ret) == 0 { @@ -187,10 +187,10 @@ func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, s var r0 []types.MonitoredTx var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) ([]types.MonitoredTx, error)); ok { return rf(ctx, statuses, rollupID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) []types.MonitoredTx); ok { + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) []types.MonitoredTx); ok { r0 = rf(ctx, statuses, rollupID, dbTx) } else { if ret.Get(0) != nil { @@ -198,7 +198,7 @@ func (_m *StorageCompressedInterface) GetClaimTxsByStatus(ctx context.Context, s } } - if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) error); ok { r1 = rf(ctx, statuses, rollupID, dbTx) } else { r1 = ret.Error(1) @@ -215,15 +215,15 @@ type StorageCompressedInterface_GetClaimTxsByStatus_Call struct { // GetClaimTxsByStatus is a helper method to define mock.On call // - ctx context.Context // - statuses []types.MonitoredTxStatus -// - rollupID uint +// - rollupID uint32 // - dbTx pgx.Tx func (_e *StorageCompressedInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, rollupID interface{}, dbTx interface{}) *StorageCompressedInterface_GetClaimTxsByStatus_Call { return &StorageCompressedInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, rollupID, dbTx)} } -func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { +func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -233,7 +233,7 @@ func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) Return(_a0 []type return _c } -func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { +func (_c *StorageCompressedInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) ([]types.MonitoredTx, error)) *StorageCompressedInterface_GetClaimTxsByStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/mocks/storage_interface.go b/claimtxman/mocks/storage_interface.go index c7b2ee95..914bf05e 100644 --- a/claimtxman/mocks/storage_interface.go +++ b/claimtxman/mocks/storage_interface.go @@ -238,7 +238,7 @@ func (_c *StorageInterface_Commit_Call) RunAndReturn(run func(context.Context, p } // GetClaimTxsByStatus provides a mock function with given fields: ctx, statuses, rollupID, dbTx -func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]types.MonitoredTx, error) { +func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]types.MonitoredTx, error) { ret := _m.Called(ctx, statuses, rollupID, dbTx) if len(ret) == 0 { @@ -247,10 +247,10 @@ func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses [] var r0 []types.MonitoredTx var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) ([]types.MonitoredTx, error)); ok { return rf(ctx, statuses, rollupID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) []types.MonitoredTx); ok { + if rf, ok := ret.Get(0).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) []types.MonitoredTx); ok { r0 = rf(ctx, statuses, rollupID, dbTx) } else { if ret.Get(0) != nil { @@ -258,7 +258,7 @@ func (_m *StorageInterface) GetClaimTxsByStatus(ctx context.Context, statuses [] } } - if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) error); ok { r1 = rf(ctx, statuses, rollupID, dbTx) } else { r1 = ret.Error(1) @@ -275,15 +275,15 @@ type StorageInterface_GetClaimTxsByStatus_Call struct { // GetClaimTxsByStatus is a helper method to define mock.On call // - ctx context.Context // - statuses []types.MonitoredTxStatus -// - rollupID uint +// - rollupID uint32 // - dbTx pgx.Tx func (_e *StorageInterface_Expecter) GetClaimTxsByStatus(ctx interface{}, statuses interface{}, rollupID interface{}, dbTx interface{}) *StorageInterface_GetClaimTxsByStatus_Call { return &StorageInterface_GetClaimTxsByStatus_Call{Call: _e.mock.On("GetClaimTxsByStatus", ctx, statuses, rollupID, dbTx)} } -func (_c *StorageInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx)) *StorageInterface_GetClaimTxsByStatus_Call { +func (_c *StorageInterface_GetClaimTxsByStatus_Call) Run(run func(ctx context.Context, statuses []types.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx)) *StorageInterface_GetClaimTxsByStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]types.MonitoredTxStatus), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -293,7 +293,7 @@ func (_c *StorageInterface_GetClaimTxsByStatus_Call) Return(_a0 []types.Monitore return _c } -func (_c *StorageInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint, pgx.Tx) ([]types.MonitoredTx, error)) *StorageInterface_GetClaimTxsByStatus_Call { +func (_c *StorageInterface_GetClaimTxsByStatus_Call) RunAndReturn(run func(context.Context, []types.MonitoredTxStatus, uint32, pgx.Tx) ([]types.MonitoredTx, error)) *StorageInterface_GetClaimTxsByStatus_Call { _c.Call.Return(run) return _c } @@ -394,7 +394,7 @@ func (_c *StorageInterface_UpdateClaimTx_Call) RunAndReturn(run func(context.Con } // UpdateL1DepositsStatus provides a mock function with given fields: ctx, exitRoot, destinationNetwork, dbTx -func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { +func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) { ret := _m.Called(ctx, exitRoot, destinationNetwork, dbTx) if len(ret) == 0 { @@ -403,10 +403,10 @@ func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot var r0 []*etherman.Deposit var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint, pgx.Tx) ([]*etherman.Deposit, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) ([]*etherman.Deposit, error)); ok { return rf(ctx, exitRoot, destinationNetwork, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint, pgx.Tx) []*etherman.Deposit); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) []*etherman.Deposit); ok { r0 = rf(ctx, exitRoot, destinationNetwork, dbTx) } else { if ret.Get(0) != nil { @@ -414,7 +414,7 @@ func (_m *StorageInterface) UpdateL1DepositsStatus(ctx context.Context, exitRoot } } - if rf, ok := ret.Get(1).(func(context.Context, []byte, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint32, pgx.Tx) error); ok { r1 = rf(ctx, exitRoot, destinationNetwork, dbTx) } else { r1 = ret.Error(1) @@ -431,15 +431,15 @@ type StorageInterface_UpdateL1DepositsStatus_Call struct { // UpdateL1DepositsStatus is a helper method to define mock.On call // - ctx context.Context // - exitRoot []byte -// - destinationNetwork uint +// - destinationNetwork uint32 // - dbTx pgx.Tx func (_e *StorageInterface_Expecter) UpdateL1DepositsStatus(ctx interface{}, exitRoot interface{}, destinationNetwork interface{}, dbTx interface{}) *StorageInterface_UpdateL1DepositsStatus_Call { return &StorageInterface_UpdateL1DepositsStatus_Call{Call: _e.mock.On("UpdateL1DepositsStatus", ctx, exitRoot, destinationNetwork, dbTx)} } -func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx)) *StorageInterface_UpdateL1DepositsStatus_Call { +func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx pgx.Tx)) *StorageInterface_UpdateL1DepositsStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]byte), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]byte), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -449,13 +449,13 @@ func (_c *StorageInterface_UpdateL1DepositsStatus_Call) Return(_a0 []*etherman.D return _c } -func (_c *StorageInterface_UpdateL1DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, uint, pgx.Tx) ([]*etherman.Deposit, error)) *StorageInterface_UpdateL1DepositsStatus_Call { +func (_c *StorageInterface_UpdateL1DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, uint32, pgx.Tx) ([]*etherman.Deposit, error)) *StorageInterface_UpdateL1DepositsStatus_Call { _c.Call.Return(run) return _c } // UpdateL2DepositsStatus provides a mock function with given fields: ctx, exitRoot, rollupID, networkID, dbTx -func (_m *StorageInterface) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID uint, networkID uint, dbTx pgx.Tx) error { +func (_m *StorageInterface) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID uint32, networkID uint32, dbTx pgx.Tx) error { ret := _m.Called(ctx, exitRoot, rollupID, networkID, dbTx) if len(ret) == 0 { @@ -463,7 +463,7 @@ func (_m *StorageInterface) UpdateL2DepositsStatus(ctx context.Context, exitRoot } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, uint32, pgx.Tx) error); ok { r0 = rf(ctx, exitRoot, rollupID, networkID, dbTx) } else { r0 = ret.Error(0) @@ -480,16 +480,16 @@ type StorageInterface_UpdateL2DepositsStatus_Call struct { // UpdateL2DepositsStatus is a helper method to define mock.On call // - ctx context.Context // - exitRoot []byte -// - rollupID uint -// - networkID uint +// - rollupID uint32 +// - networkID uint32 // - dbTx pgx.Tx func (_e *StorageInterface_Expecter) UpdateL2DepositsStatus(ctx interface{}, exitRoot interface{}, rollupID interface{}, networkID interface{}, dbTx interface{}) *StorageInterface_UpdateL2DepositsStatus_Call { return &StorageInterface_UpdateL2DepositsStatus_Call{Call: _e.mock.On("UpdateL2DepositsStatus", ctx, exitRoot, rollupID, networkID, dbTx)} } -func (_c *StorageInterface_UpdateL2DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, rollupID uint, networkID uint, dbTx pgx.Tx)) *StorageInterface_UpdateL2DepositsStatus_Call { +func (_c *StorageInterface_UpdateL2DepositsStatus_Call) Run(run func(ctx context.Context, exitRoot []byte, rollupID uint32, networkID uint32, dbTx pgx.Tx)) *StorageInterface_UpdateL2DepositsStatus_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]byte), args[2].(uint), args[3].(uint), args[4].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]byte), args[2].(uint32), args[3].(uint32), args[4].(pgx.Tx)) }) return _c } @@ -499,7 +499,7 @@ func (_c *StorageInterface_UpdateL2DepositsStatus_Call) Return(_a0 error) *Stora return _c } -func (_c *StorageInterface_UpdateL2DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, uint, uint, pgx.Tx) error) *StorageInterface_UpdateL2DepositsStatus_Call { +func (_c *StorageInterface_UpdateL2DepositsStatus_Call) RunAndReturn(run func(context.Context, []byte, uint32, uint32, pgx.Tx) error) *StorageInterface_UpdateL2DepositsStatus_Call { _c.Call.Return(run) return _c } diff --git a/claimtxman/monitor_compressed_txs.go b/claimtxman/monitor_compressed_txs.go index f3830479..bbbfb164 100644 --- a/claimtxman/monitor_compressed_txs.go +++ b/claimtxman/monitor_compressed_txs.go @@ -19,7 +19,7 @@ const ( ) type StorageCompressedInterface interface { - GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) + GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) GetMonitoredTxsGroups(ctx context.Context, groupIds []uint64, dbTx pgx.Tx) (map[uint64]ctmtypes.MonitoredTxGroupDBEntry, error) AddMonitoredTxsGroup(ctx context.Context, mTxGroup *ctmtypes.MonitoredTxGroupDBEntry, dbTx pgx.Tx) error @@ -49,7 +49,7 @@ type MonitorCompressedTxs struct { timeProvider utils.TimeProvider triggerGroups *GroupsTrigger gasOffset uint64 - rollupID uint + rollupID uint32 } func NewMonitorCompressedTxs(ctx context.Context, @@ -61,7 +61,7 @@ func NewMonitorCompressedTxs(ctx context.Context, etherMan EthermanI, timeProvider utils.TimeProvider, gasOffset uint64, - rollupID uint) *MonitorCompressedTxs { + rollupID uint32) *MonitorCompressedTxs { composer, err := NewComposeCompressClaim() if err != nil { log.Fatal("failed to create ComposeCompressClaim: %v", err) diff --git a/claimtxman/monitortxs.go b/claimtxman/monitortxs.go index 15a494ff..783c9671 100644 --- a/claimtxman/monitortxs.go +++ b/claimtxman/monitortxs.go @@ -25,7 +25,7 @@ type MonitorTxs struct { // client is the ethereum client l2Node *utils.Client cfg Config - rollupID uint + rollupID uint32 nonceCache *NonceCache auth *bind.TransactOpts } @@ -35,7 +35,7 @@ func NewMonitorTxs(ctx context.Context, l2Node *utils.Client, cfg Config, nonceCache *NonceCache, - rollupID uint, + rollupID uint32, auth *bind.TransactOpts) *MonitorTxs { return &MonitorTxs{ rollupID: rollupID, diff --git a/cmd/run.go b/cmd/run.go index ae1b3682..638b982c 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -58,7 +58,7 @@ func start(ctx *cli.Context) error { networkID := l1Etherman.GetNetworkID() log.Infof("main network id: %d", networkID) - var networkIDs = []uint{networkID} + var networkIDs = []uint32{networkID} for _, client := range l2Ethermans { networkID := client.GetNetworkID() log.Infof("l2 network id: %d", networkID) @@ -97,17 +97,17 @@ func start(ctx *cli.Context) error { } var chsExitRootEvent []chan *etherman.GlobalExitRoot - var chsSyncedL2 []chan uint + var chsSyncedL2 []chan uint32 for i, l2EthermanClient := range l2Ethermans { log.Debug("trusted sequencer URL ", c.Etherman.L2URLs[i]) zkEVMClient := client.NewClient(c.Etherman.L2URLs[i]) chExitRootEventL2 := make(chan *etherman.GlobalExitRoot) - chSyncedL2 := make(chan uint) + chSyncedL2 := make(chan uint32) chsExitRootEvent = append(chsExitRootEvent, chExitRootEventL2) chsSyncedL2 = append(chsSyncedL2, chSyncedL2) - go runSynchronizer(ctx.Context, 0, bridgeController, l2EthermanClient, c.Synchronizer, storage, zkEVMClient, chExitRootEventL2, nil, chSyncedL2, []uint{}) + go runSynchronizer(ctx.Context, 0, bridgeController, l2EthermanClient, c.Synchronizer, storage, zkEVMClient, chExitRootEventL2, nil, chSyncedL2, []uint32{}) } - chSynced := make(chan uint) + chSynced := make(chan uint32) go runSynchronizer(ctx.Context, c.NetworkConfig.GenBlockNumber, bridgeController, l1Etherman, c.Synchronizer, storage, nil, nil, chsExitRootEvent, chSynced, networkIDs) go func() { for { @@ -164,7 +164,7 @@ func setupLog(c log.Config) { log.Init(c) } -func monitorChannel(ctx context.Context, chExitRootEvent chan *etherman.GlobalExitRoot, chSynced chan uint, networkID uint, storage db.Storage) { +func monitorChannel(ctx context.Context, chExitRootEvent chan *etherman.GlobalExitRoot, chSynced chan uint32, networkID uint32, storage db.Storage) { go func() { for { select { @@ -237,7 +237,7 @@ func newEthermans(c *config.Config) (*etherman.Client, []*etherman.Client, error return l1Etherman, l2Ethermans, nil } -func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint, allNetworkIDs []uint) { +func runSynchronizer(ctx context.Context, genBlockNumber uint64, brdigeCtrl *bridgectrl.BridgeController, etherman *etherman.Client, cfg synchronizer.Config, storage db.Storage, zkEVMClient *client.Client, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, chSynced chan uint32, allNetworkIDs []uint32) { sy, err := synchronizer.NewSynchronizer(ctx, storage, brdigeCtrl, etherman, zkEVMClient, genBlockNumber, chExitRootEventL2, chsExitRootEvent, chSynced, cfg, allNetworkIDs) if err != nil { log.Fatal(err) diff --git a/db/pgstorage/migrations/0007_test.go b/db/pgstorage/migrations/0007_test.go index a6fb0a7a..3bee32f7 100644 --- a/db/pgstorage/migrations/0007_test.go +++ b/db/pgstorage/migrations/0007_test.go @@ -61,7 +61,7 @@ func (m migrationTest0007) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) ) getClaimSQL := "SELECT index, orig_net, orig_addr, amount, dest_addr, block_id, network_id, tx_hash, rollup_index, mainnet_flag FROM sync.claim WHERE index = $1 AND network_id = $2" _ = db.QueryRow(getClaimSQL, 3, 1).Scan(&claim.Index, &claim.OriginalNetwork, &claim.OriginalAddress, &amount, &claim.DestinationAddress, &claim.BlockID, &claim.NetworkID, &claim.TxHash, &claim.RollupIndex, &claim.MainnetFlag) - assert.Equal(t, uint64(0), claim.RollupIndex) + assert.Equal(t, uint32(0), claim.RollupIndex) assert.Equal(t, false, claim.MainnetFlag) insertClaim := "INSERT INTO sync.Claim (network_id, index, orig_net, orig_addr, amount, dest_addr, block_id, tx_hash, rollup_index, mainnet_flag) VALUES(1, 4, 0, decode('0000000000000000000000000000000000000000','hex'), '300000000000000000', decode('14567C0DCF79C20FE1A21E36EC975D1775A1905C','hex'), 2, decode('A9505DB7D7EDD08947F12F2B1F7898148FFB43D80BCB977B78161EF14173D575','hex'), 37, true);" @@ -71,7 +71,7 @@ func (m migrationTest0007) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) getClaimSQL = "SELECT index, orig_net, orig_addr, amount, dest_addr, block_id, network_id, tx_hash, rollup_index, mainnet_flag FROM sync.claim WHERE index = $1 AND network_id = $2" _ = db.QueryRow(getClaimSQL, 4, 1).Scan(&claim.Index, &claim.OriginalNetwork, &claim.OriginalAddress, &amount, &claim.DestinationAddress, &claim.BlockID, &claim.NetworkID, &claim.TxHash, &claim.RollupIndex, &claim.MainnetFlag) assert.NoError(t, err) - assert.Equal(t, uint64(37), claim.RollupIndex) + assert.Equal(t, uint32(37), claim.RollupIndex) assert.Equal(t, true, claim.MainnetFlag) } diff --git a/db/pgstorage/migrations/0013.sql b/db/pgstorage/migrations/0013.sql new file mode 100644 index 00000000..2f3affc0 --- /dev/null +++ b/db/pgstorage/migrations/0013.sql @@ -0,0 +1,35 @@ +-- +migrate Up +ALTER TABLE sync.claim ALTER COLUMN network_id TYPE BIGINT; +ALTER TABLE sync.deposit ALTER COLUMN network_id TYPE BIGINT; +ALTER TABLE sync.token_wrapped ALTER COLUMN network_id TYPE BIGINT; +ALTER TABLE sync.block ALTER COLUMN network_id TYPE BIGINT; +ALTER TABLE sync.exit_root ALTER COLUMN network_id TYPE BIGINT; +ALTER TABLE mt.root ALTER COLUMN network TYPE BIGINT; + +ALTER TABLE sync.block ALTER COLUMN id TYPE BIGINT; +CREATE SEQUENCE IF NOT EXISTS sync.block_id_seq; +ALTER TABLE sync.block ALTER COLUMN id SET NOT NULL; +ALTER TABLE sync.block ALTER COLUMN id SET DEFAULT nextval('sync.block_id_seq'); +ALTER SEQUENCE sync.block_id_seq OWNED BY sync.block.id; + +ALTER TABLE sync.exit_root ALTER COLUMN id TYPE BIGINT; +CREATE SEQUENCE IF NOT EXISTS sync.exit_root_id_seq; +ALTER TABLE sync.exit_root ALTER COLUMN id SET NOT NULL; +ALTER TABLE sync.exit_root ALTER COLUMN id SET DEFAULT nextval('sync.exit_root_id_seq'); +ALTER SEQUENCE sync.exit_root_id_seq OWNED BY sync.exit_root.id; + +ALTER TABLE sync.deposit ALTER COLUMN id TYPE BIGINT; +CREATE SEQUENCE IF NOT EXISTS sync.deposit_id_seq; +ALTER TABLE sync.deposit ALTER COLUMN id SET NOT NULL; +ALTER TABLE sync.deposit ALTER COLUMN id SET DEFAULT nextval('sync.deposit_id_seq'); +ALTER SEQUENCE sync.deposit_id_seq OWNED BY sync.deposit.id; + +-- +migrate Down +ALTER TABLE sync.claim ALTER COLUMN network_id TYPE INTEGER; +ALTER TABLE sync.deposit ALTER COLUMN network_id TYPE INTEGER; +ALTER TABLE sync.token_wrapped ALTER COLUMN network_id TYPE INTEGER; +ALTER TABLE mt.root ALTER COLUMN network TYPE INTEGER; +ALTER TABLE sync.block ALTER COLUMN network_id TYPE INTEGER; +ALTER TABLE sync.exit_root ALTER COLUMN network_id TYPE INTEGER; + +-- No need to revert the SERIAL to BIGSERIAL type changed \ No newline at end of file diff --git a/db/pgstorage/migrations/0013_test.go b/db/pgstorage/migrations/0013_test.go new file mode 100644 index 00000000..2f56c8db --- /dev/null +++ b/db/pgstorage/migrations/0013_test.go @@ -0,0 +1,93 @@ +package migrations_test + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/assert" +) + +type migrationTest0013 struct{} + +func (m migrationTest0013) InsertData(db *sql.DB) error { + block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(2, 2803824, decode('27474F16174BBE50C294FE13C190B92E42B2368A6D4AEB8A4A015F52816296C3','hex'), decode('C9B5033799ADF3739383A0489EFBE8A0D4D5E4478778A4F4304562FD51AE4C07','hex'), 1, '0001-01-01 01:00:00.000');" + if _, err := db.Exec(block); err != nil { + return err + } + return nil +} + +func (m migrationTest0013) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) { + insertDeposit := "INSERT INTO sync.deposit(leaf_type, network_id, orig_net, orig_addr, amount, dest_net, dest_addr, block_id, deposit_cnt, tx_hash, metadata, id, ready_for_claim) " + + "VALUES(0, 4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'), '10000000000000000000', 4294967295, decode('C949254D682D8C9AD5682521675B8F43B102AEC4','hex'), 2, 4294967295, decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), decode('','hex'), 9223372036854775807, true);" + if _, err := db.Exec(insertDeposit); err != nil { + assert.NoError(t, err) + } + insertClaim := "INSERT INTO sync.Claim (network_id, index, orig_net, orig_addr, amount, dest_addr, block_id, tx_hash) VALUES(4294967295, 4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'), '300000000000000000', decode('14567C0DCF79C20FE1A21E36EC975D1775A1905C','hex'), 2, decode('A9505DB7D7EDD08947F12F2B1F7898148FFB43D80BCB977B78161EF14173D575','hex'));" + if _, err := db.Exec(insertClaim); err != nil { + assert.NoError(t, err) + } + insertTokenWrapped := "INSERT INTO sync.token_wrapped (network_id, orig_net, orig_token_addr, wrapped_token_addr, block_id) " + + "VALUES(4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'),decode('0000000000000000000000000000000000000000','hex'), 2);" + if _, err := db.Exec(insertTokenWrapped); err != nil { + assert.NoError(t, err) + } + block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(9223372036854775807, 4294967295, decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), 4294967295, '0001-01-01 01:00:00.000');" + if _, err := db.Exec(block); err != nil { + assert.NoError(t, err) + } + exitRoot := "INSERT INTO sync.exit_root(id, block_id, global_exit_root, exit_roots, network_id) VALUES(9223372036854775807, 9223372036854775807, decode('B881611B39DC5EAFF3AF06DCA56A0AB9997EF9F72FA2B34BCC80F1CCDC4242CD','hex'), '{decode(''5C7865386436396433336461383039616339653861323963666632373264663630656461373461646330626164653230313733393639636464656333643531616232'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}', 4294967295);" + if _, err := db.Exec(exitRoot); err != nil { + assert.NoError(t, err) + } + root := "INSERT INTO mt.root(root, network, deposit_id) VALUES(decode('E8D69D33DA809AC9E8A29CFF272DF60EDA74ADC0BADE20173969CDDEC3D51AB2','hex'), 4294967295, 9223372036854775807);" + if _, err := db.Exec(root); err != nil { + assert.NoError(t, err) + } + // Remove values for down migration + _, err := db.Exec("DELETE FROM sync.claim;") + assert.NoError(t, err) + _, err = db.Exec("DELETE FROM mt.root;") + assert.NoError(t, err) + _, err = db.Exec("DELETE FROM sync.deposit;") + assert.NoError(t, err) + _, err = db.Exec("DELETE FROM sync.token_wrapped;") + assert.NoError(t, err) + _, err = db.Exec("DELETE FROM sync.block WHERE id = 9223372036854775807;") + assert.NoError(t, err) + _, err = db.Exec("DELETE FROM sync.exit_root;") + assert.NoError(t, err) +} + +func (m migrationTest0013) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) { + insertDeposit := "INSERT INTO sync.deposit(leaf_type, network_id, orig_net, orig_addr, amount, dest_net, dest_addr, block_id, deposit_cnt, tx_hash, metadata, id, ready_for_claim) " + + "VALUES(0, 4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'), '10000000000000000000', 4294967295, decode('C949254D682D8C9AD5682521675B8F43B102AEC4','hex'), 2, 4294967295, decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), decode('','hex'), 1, true);" + if _, err := db.Exec(insertDeposit); err != nil { + assert.Error(t, err) + } + insertClaim := "INSERT INTO sync.Claim (network_id, index, orig_net, orig_addr, amount, dest_addr, block_id, tx_hash) VALUES(4294967295, 4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'), '300000000000000000', decode('14567C0DCF79C20FE1A21E36EC975D1775A1905C','hex'), 2, decode('A9505DB7D7EDD08947F12F2B1F7898148FFB43D80BCB977B78161EF14173D575','hex'));" + if _, err := db.Exec(insertClaim); err != nil { + assert.Error(t, err) + } + insertTokenWrapped := "INSERT INTO sync.token_wrapped (network_id, orig_net, orig_token_addr, wrapped_token_addr, block_id) " + + "VALUES(4294967295, 4294967295, decode('0000000000000000000000000000000000000000','hex'),decode('0000000000000000000000000000000000000000','hex'), 2);" + if _, err := db.Exec(insertTokenWrapped); err != nil { + assert.Error(t, err) + } + block := "INSERT INTO sync.block (id, block_num, block_hash, parent_hash, network_id, received_at) VALUES(5, 4294967295, decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), decode('C2D6575EA98EB55E36B5AC6E11196800362594458A4B3143DB50E4995CB2422E','hex'), 4294967295, '0001-01-01 01:00:00.000');" + if _, err := db.Exec(block); err != nil { + assert.Error(t, err) + } + exitRoot := "INSERT INTO sync.exit_root(id, block_id, global_exit_root, exit_roots, network_id) VALUES(2, 2, decode('B881611B39DC5EAFF3AF06DCA56A0AB9997EF9F72FA2B34BCC80F1CCDC4242CD','hex'), '{decode(''5C7865386436396433336461383039616339653861323963666632373264663630656461373461646330626164653230313733393639636464656333643531616232'',''hex''),decode(''5C7832376165356261303864373239316339366338636264646363313438626634386136643638633739373462393433353666353337353465663631373164373537'',''hex'')}', 4294967295);" + if _, err := db.Exec(exitRoot); err != nil { + assert.Error(t, err) + } + root := "INSERT INTO mt.root(root, network, deposit_id) VALUES(decode('E8D69D33DA809AC9E8A29CFF272DF60EDA74ADC0BADE20173969CDDEC3D51AB2','hex'), 4294967295, 1);" + if _, err := db.Exec(root); err != nil { + assert.Error(t, err) + } +} + +func TestMigration0013(t *testing.T) { + runMigrationTest(t, 13, migrationTest0013{}) +} diff --git a/db/pgstorage/pgstorage.go b/db/pgstorage/pgstorage.go index 1c541a29..ebcee5e3 100644 --- a/db/pgstorage/pgstorage.go +++ b/db/pgstorage/pgstorage.go @@ -69,7 +69,7 @@ func (p *PostgresStorage) BeginDBTransaction(ctx context.Context) (pgx.Tx, error } // GetLastBlock gets the last block. -func (p *PostgresStorage) GetLastBlock(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.Block, error) { +func (p *PostgresStorage) GetLastBlock(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.Block, error) { var block etherman.Block const getLastBlockSQL = "SELECT id, block_num, block_hash, parent_hash, network_id, received_at FROM sync.block where network_id = $1 ORDER BY block_num DESC LIMIT 1" @@ -128,7 +128,7 @@ func (p *PostgresStorage) AddClaim(ctx context.Context, claim *etherman.Claim, d } // GetTokenMetadata gets the metadata of the dedicated token. -func (p *PostgresStorage) GetTokenMetadata(ctx context.Context, networkID, destNet uint, originalTokenAddr common.Address, dbTx pgx.Tx) ([]byte, error) { +func (p *PostgresStorage) GetTokenMetadata(ctx context.Context, networkID, destNet uint32, originalTokenAddr common.Address, dbTx pgx.Tx) ([]byte, error) { var metadata []byte const getMetadataSQL = "SELECT metadata from sync.deposit WHERE network_id = $1 AND orig_addr = $2 AND dest_net = $3 AND metadata IS NOT NULL LIMIT 1" e := p.getExecQuerier(dbTx) @@ -161,7 +161,7 @@ func (p *PostgresStorage) AddTokenWrapped(ctx context.Context, tokenWrapped *eth } // Reset resets the state to a block for the given DB tx. -func (p *PostgresStorage) Reset(ctx context.Context, blockNumber uint64, networkID uint, dbTx pgx.Tx) error { +func (p *PostgresStorage) Reset(ctx context.Context, blockNumber uint64, networkID uint32, dbTx pgx.Tx) error { const resetSQL = "DELETE FROM sync.block WHERE block_num > $1 AND network_id = $2" e := p.getExecQuerier(dbTx) _, err := e.Exec(ctx, resetSQL, blockNumber, networkID) @@ -169,7 +169,7 @@ func (p *PostgresStorage) Reset(ctx context.Context, blockNumber uint64, network } // GetPreviousBlock gets the offset previous L1 block respect to latest. -func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, networkID uint, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) { +func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, networkID uint32, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) { var block etherman.Block const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, network_id, received_at FROM sync.block WHERE network_id = $1 ORDER BY block_num DESC LIMIT 1 OFFSET $2" e := p.getExecQuerier(dbTx) @@ -181,11 +181,11 @@ func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, networkID uint, } // GetNumberDeposits gets the number of deposits. -func (p *PostgresStorage) GetNumberDeposits(ctx context.Context, networkID uint, blockNumber uint64, dbTx pgx.Tx) (uint64, error) { +func (p *PostgresStorage) GetNumberDeposits(ctx context.Context, networkID uint32, blockNumber uint64, dbTx pgx.Tx) (uint32, error) { var nDeposits int64 const getNumDepositsSQL = "SELECT coalesce(MAX(deposit_cnt), -1) FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE d.network_id = $1 AND b.block_num <= $2" err := p.getExecQuerier(dbTx).QueryRow(ctx, getNumDepositsSQL, networkID, blockNumber).Scan(&nDeposits) - return uint64(nDeposits + 1), err + return uint32(nDeposits + 1), err } // AddTrustedGlobalExitRoot adds new global exit root which comes from the trusted sequencer. @@ -199,7 +199,7 @@ func (p *PostgresStorage) AddTrustedGlobalExitRoot(ctx context.Context, trustedE } // GetClaim gets a specific claim from the storage. -func (p *PostgresStorage) GetClaim(ctx context.Context, depositCount, originNetworkID, networkID uint, dbTx pgx.Tx) (*etherman.Claim, error) { +func (p *PostgresStorage) GetClaim(ctx context.Context, depositCount, originNetworkID, networkID uint32, dbTx pgx.Tx) (*etherman.Claim, error) { var ( claim etherman.Claim amount string @@ -239,7 +239,7 @@ func (p *PostgresStorage) GetClaim(ctx context.Context, depositCount, originNetw } // GetDeposit gets a specific deposit from the storage. -func (p *PostgresStorage) GetDeposit(ctx context.Context, depositCounterUser uint, networkID uint, dbTx pgx.Tx) (*etherman.Deposit, error) { +func (p *PostgresStorage) GetDeposit(ctx context.Context, depositCounterUser, networkID uint32, dbTx pgx.Tx) (*etherman.Deposit, error) { var ( deposit etherman.Deposit amount string @@ -255,7 +255,7 @@ func (p *PostgresStorage) GetDeposit(ctx context.Context, depositCounterUser uin } // GetLatestExitRoot gets the latest global exit root. -func (p *PostgresStorage) GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { +func (p *PostgresStorage) GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { if networkID == 0 { return p.GetLatestTrustedExitRoot(ctx, destNetwork, dbTx) } @@ -300,7 +300,7 @@ func (p *PostgresStorage) GetExitRootByGER(ctx context.Context, ger common.Hash, } // GetLatestTrustedExitRoot gets the latest trusted global exit root. -func (p *PostgresStorage) GetLatestTrustedExitRoot(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { +func (p *PostgresStorage) GetLatestTrustedExitRoot(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { var ( ger etherman.GlobalExitRoot exitRoots [][]byte @@ -318,7 +318,7 @@ func (p *PostgresStorage) GetLatestTrustedExitRoot(ctx context.Context, networkI } // GetTokenWrapped gets a specific wrapped token. -func (p *PostgresStorage) GetTokenWrapped(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) { +func (p *PostgresStorage) GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) { const getWrappedTokenSQL = "SELECT network_id, orig_net, orig_token_addr, wrapped_token_addr, block_id, name, symbol, decimals FROM sync.token_wrapped WHERE orig_net = $1 AND orig_token_addr = $2" var token etherman.TokenWrapped @@ -353,8 +353,8 @@ func (p *PostgresStorage) GetTokenWrapped(ctx context.Context, originalNetwork u } // GetDepositCountByRoot gets the deposit count by the root. -func (p *PostgresStorage) GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) { - var depositCount uint +func (p *PostgresStorage) GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (uint32, error) { + var depositCount uint32 const getDepositCountByRootSQL = "SELECT sync.deposit.deposit_cnt FROM mt.root INNER JOIN sync.deposit ON sync.deposit.id = mt.root.deposit_id WHERE mt.root.root = $1 AND mt.root.network = $2" err := p.getExecQuerier(dbTx).QueryRow(ctx, getDepositCountByRootSQL, root, network).Scan(&depositCount) if errors.Is(err, pgx.ErrNoRows) { @@ -364,7 +364,7 @@ func (p *PostgresStorage) GetDepositCountByRoot(ctx context.Context, root []byte } // CheckIfRootExists checks that the root exists on the db. -func (p *PostgresStorage) CheckIfRootExists(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (bool, error) { +func (p *PostgresStorage) CheckIfRootExists(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (bool, error) { var count uint const getDepositCountByRootSQL = "SELECT count(*) FROM mt.root WHERE root = $1 AND network = $2" err := p.getExecQuerier(dbTx).QueryRow(ctx, getDepositCountByRootSQL, root, network).Scan(&count) @@ -378,7 +378,7 @@ func (p *PostgresStorage) CheckIfRootExists(ctx context.Context, root []byte, ne } // GetRoot gets root by the deposit count from the merkle tree. -func (p *PostgresStorage) GetRoot(ctx context.Context, depositCnt uint, network uint, dbTx pgx.Tx) ([]byte, error) { +func (p *PostgresStorage) GetRoot(ctx context.Context, depositCnt, network uint32, dbTx pgx.Tx) ([]byte, error) { var root []byte const getRootByDepositCntSQL = "SELECT root FROM mt.root inner join sync.deposit on mt.root.deposit_id = sync.deposit.id WHERE sync.deposit.deposit_cnt = $1 AND network = $2" err := p.getExecQuerier(dbTx).QueryRow(ctx, getRootByDepositCntSQL, depositCnt, network).Scan(&root) @@ -389,7 +389,7 @@ func (p *PostgresStorage) GetRoot(ctx context.Context, depositCnt uint, network } // SetRoot store the root with deposit count to the storage. -func (p *PostgresStorage) SetRoot(ctx context.Context, root []byte, depositID uint64, network uint, dbTx pgx.Tx) error { +func (p *PostgresStorage) SetRoot(ctx context.Context, root []byte, depositID uint64, network uint32, dbTx pgx.Tx) error { const setRootSQL = "INSERT INTO mt.root (root, deposit_id, network) VALUES ($1, $2, $3);" _, err := p.getExecQuerier(dbTx).Exec(ctx, setRootSQL, root, depositID, network) return err @@ -494,17 +494,17 @@ func (p *PostgresStorage) GetLatestRollupExitLeaves(ctx context.Context, dbTx pg } // GetLastDepositCount gets the last deposit count from the merkle tree. -func (p *PostgresStorage) GetLastDepositCount(ctx context.Context, network uint, dbTx pgx.Tx) (uint, error) { +func (p *PostgresStorage) GetLastDepositCount(ctx context.Context, networkID uint32, dbTx pgx.Tx) (uint32, error) { var depositCnt int64 const getLastDepositCountSQL = "SELECT coalesce(MAX(deposit_cnt), -1) FROM sync.deposit WHERE id = (SELECT coalesce(MAX(deposit_id), -1) FROM mt.root WHERE network = $1)" - err := p.getExecQuerier(dbTx).QueryRow(ctx, getLastDepositCountSQL, network).Scan(&depositCnt) + err := p.getExecQuerier(dbTx).QueryRow(ctx, getLastDepositCountSQL, networkID).Scan(&depositCnt) if err != nil { return 0, nil } if depositCnt < 0 { return 0, gerror.ErrStorageNotFound } - return uint(depositCnt), nil + return uint32(depositCnt), nil } // GetClaimCount gets the claim count for the destination address. @@ -519,7 +519,7 @@ func (p *PostgresStorage) GetClaimCount(ctx context.Context, destAddr string, db } // GetClaims gets the claim list which be smaller than index. -func (p *PostgresStorage) GetClaims(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Claim, error) { +func (p *PostgresStorage) GetClaims(ctx context.Context, destAddr string, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Claim, error) { const getClaimsSQL = "SELECT index, orig_net, orig_addr, amount, dest_addr, block_id, network_id, tx_hash, rollup_index, mainnet_flag FROM sync.claim WHERE dest_addr = $1 ORDER BY block_id DESC LIMIT $2 OFFSET $3" rows, err := p.getExecQuerier(dbTx).Query(ctx, getClaimsSQL, common.FromHex(destAddr), limit, offset) if err != nil { @@ -543,7 +543,7 @@ func (p *PostgresStorage) GetClaims(ctx context.Context, destAddr string, limit } // GetDeposits gets the deposit list which be smaller than depositCount. -func (p *PostgresStorage) GetDeposits(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { +func (p *PostgresStorage) GetDeposits(ctx context.Context, destAddr string, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) { const getDepositsSQL = "SELECT d.id, leaf_type, orig_net, orig_addr, amount, dest_net, dest_addr, deposit_cnt, block_id, b.block_num, d.network_id, tx_hash, metadata, ready_for_claim FROM sync.deposit as d INNER JOIN sync.block as b ON d.network_id = b.network_id AND d.block_id = b.id WHERE dest_addr = $1 ORDER BY d.block_id DESC, d.deposit_cnt DESC LIMIT $2 OFFSET $3" rows, err := p.getExecQuerier(dbTx).Query(ctx, getDepositsSQL, common.FromHex(destAddr), limit, offset) if err != nil { @@ -577,7 +577,7 @@ func (p *PostgresStorage) GetDepositCount(ctx context.Context, destAddr string, } // UpdateL1DepositsStatus updates the ready_for_claim status of L1 deposits. -func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { +func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot []byte, destinationNetwork uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) { const updateDepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true WHERE deposit_cnt <= (SELECT sync.deposit.deposit_cnt FROM mt.root INNER JOIN sync.deposit ON sync.deposit.id = mt.root.deposit_id WHERE mt.root.root = $1 AND mt.root.network = 0) @@ -605,7 +605,7 @@ func (p *PostgresStorage) UpdateL1DepositsStatus(ctx context.Context, exitRoot [ } // UpdateL2DepositsStatus updates the ready_for_claim status of L2 deposits. -func (p *PostgresStorage) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint, dbTx pgx.Tx) error { +func (p *PostgresStorage) UpdateL2DepositsStatus(ctx context.Context, exitRoot []byte, rollupID, networkID uint32, dbTx pgx.Tx) error { const updateDepositsStatusSQL = `UPDATE sync.deposit SET ready_for_claim = true WHERE deposit_cnt <= (SELECT sync.deposit.deposit_cnt FROM mt.root INNER JOIN sync.deposit ON sync.deposit.id = mt.root.deposit_id WHERE mt.root.root = (select leaf from mt.rollup_exit where root = $1 and rollup_id = $2) AND mt.root.network = $3) @@ -644,7 +644,7 @@ func (p *PostgresStorage) UpdateClaimTx(ctx context.Context, mTx ctmtypes.Monito } // GetClaimTxsByStatus gets the monitored transactions by status. -func (p *PostgresStorage) GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) { +func (p *PostgresStorage) GetClaimTxsByStatus(ctx context.Context, statuses []ctmtypes.MonitoredTxStatus, rollupID uint32, dbTx pgx.Tx) ([]ctmtypes.MonitoredTx, error) { const getMonitoredTxsSQL = "SELECT deposit_id, from_addr, to_addr, nonce, value, data, gas, status, history, created_at, updated_at, group_id, global_exit_root FROM sync.monitored_txs INNER JOIN sync.deposit ON sync.deposit.id = sync.monitored_txs.deposit_id WHERE status = ANY($1) AND sync.deposit.dest_net = $2 ORDER BY created_at ASC" rows, err := p.getExecQuerier(dbTx).Query(ctx, getMonitoredTxsSQL, pq.Array(statuses), rollupID) if errors.Is(err, pgx.ErrNoRows) { @@ -676,15 +676,15 @@ func (p *PostgresStorage) GetClaimTxsByStatus(ctx context.Context, statuses []ct } // GetPendingDepositsToClaim gets the deposit list which is not claimed in the destination network. -func (p *PostgresStorage) GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork uint64, leafType, limit uint32, offset uint64, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) { +func (p *PostgresStorage) GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork, leafType, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) { desAddrSQL := "" if destAddress != (common.Address{}) { str := strings.TrimPrefix(destAddress.String(), "0x") desAddrSQL = "AND dest_addr = decode('" + str + "','hex')" } getNumberPendingDepositsToClaimSQL := "SELECT count(*) FROM sync.deposit WHERE dest_net = $1 AND ready_for_claim = true AND leaf_type = $2 " + desAddrSQL + " AND deposit_cnt NOT IN (SELECT index FROM sync.claim WHERE sync.claim.network_id = $1)" - var depositCount uint64 - err := p.getExecQuerier(dbTx).QueryRow(ctx, getNumberPendingDepositsToClaimSQL, destNetwork, leafType).Scan(&depositCount) + var totalCount uint64 + err := p.getExecQuerier(dbTx).QueryRow(ctx, getNumberPendingDepositsToClaimSQL, destNetwork, leafType).Scan(&totalCount) if err != nil { return nil, 0, err } @@ -709,7 +709,7 @@ func (p *PostgresStorage) GetPendingDepositsToClaim(ctx context.Context, destAdd deposits = append(deposits, &deposit) } - return deposits, depositCount, nil + return deposits, totalCount, nil } // UpdateDepositsStatusForTesting updates the ready_for_claim status of all deposits for testing. @@ -720,7 +720,7 @@ func (p *PostgresStorage) UpdateDepositsStatusForTesting(ctx context.Context, db } // UpdateBlocksForTesting updates the hash of blocks. -func (p *PostgresStorage) UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64, dbTx pgx.Tx) error { +func (p *PostgresStorage) UpdateBlocksForTesting(ctx context.Context, networkID uint32, blockNum uint64, dbTx pgx.Tx) error { const updateBlocksSQL = "UPDATE sync.block SET block_hash = SUBSTRING(block_hash FROM 1 FOR LENGTH(block_hash)-1) || '\x61' WHERE network_id = $1 AND block_num >= $2" _, err := p.getExecQuerier(dbTx).Exec(ctx, updateBlocksSQL, networkID, blockNum) return err diff --git a/db/pgstorage/pgstorage_test.go b/db/pgstorage/pgstorage_test.go index 44c08c64..38f428eb 100644 --- a/db/pgstorage/pgstorage_test.go +++ b/db/pgstorage/pgstorage_test.go @@ -66,25 +66,25 @@ func TestGetLeaves(t *testing.T) { assert.Equal(t, "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f6", leaves[0].Leaf.String()) assert.Equal(t, uint64(5), leaves[0].ID) assert.Equal(t, uint64(2), leaves[0].BlockID) - assert.Equal(t, uint(1), leaves[0].RollupId) + assert.Equal(t, uint32(1), leaves[0].RollupId) assert.Equal(t, "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25722", leaves[0].Root.String()) assert.Equal(t, "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc34110", leaves[1].Leaf.String()) assert.Equal(t, uint64(8), leaves[1].ID) assert.Equal(t, uint64(1), leaves[1].BlockID) - assert.Equal(t, uint(2), leaves[1].RollupId) + assert.Equal(t, uint32(2), leaves[1].RollupId) assert.Equal(t, "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25722", leaves[1].Root.String()) assert.Equal(t, "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5e", leaves[2].Leaf.String()) assert.Equal(t, uint64(7), leaves[2].ID) assert.Equal(t, uint64(2), leaves[2].BlockID) - assert.Equal(t, uint(3), leaves[2].RollupId) + assert.Equal(t, uint32(3), leaves[2].RollupId) assert.Equal(t, "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25722", leaves[2].Root.String()) assert.Equal(t, "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf243", leaves[3].Leaf.String()) assert.Equal(t, uint64(6), leaves[3].ID) assert.Equal(t, uint64(2), leaves[3].BlockID) - assert.Equal(t, uint(4), leaves[3].RollupId) + assert.Equal(t, uint32(4), leaves[3].RollupId) assert.Equal(t, "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25722", leaves[3].Root.String()) } @@ -193,50 +193,50 @@ func TestGetPendingDepositsToClaim(t *testing.T) { _, err = store.Exec(ctx, data) require.NoError(t, err) - deposits, depositCount, err := store.GetPendingDepositsToClaim(ctx, common.Address{}, 1, 0, 2, 0, nil) + deposits, totalCount, err := store.GetPendingDepositsToClaim(ctx, common.Address{}, 1, 0, 2, 0, nil) require.NoError(t, err) assert.Equal(t, 2, len(deposits)) - assert.Equal(t, uint64(2), depositCount) + assert.Equal(t, uint64(2), totalCount) assert.Equal(t, uint8(0), deposits[0].LeafType) - assert.Equal(t, uint(0), deposits[0].NetworkID) - assert.Equal(t, uint(0), deposits[0].OriginalNetwork) + assert.Equal(t, uint32(0), deposits[0].NetworkID) + assert.Equal(t, uint32(0), deposits[0].OriginalNetwork) assert.Equal(t, common.Address{}, deposits[0].OriginalAddress) assert.Equal(t, big.NewInt(90000000000000000), deposits[0].Amount) - assert.Equal(t, uint(1), deposits[0].DestinationNetwork) + assert.Equal(t, uint32(1), deposits[0].DestinationNetwork) assert.Equal(t, common.HexToAddress("0xF39FD6E51AAD88F6F4CE6AB8827279CFFFB92266"), deposits[0].DestinationAddress) assert.Equal(t, uint64(1), deposits[0].BlockID) - assert.Equal(t, uint(1), deposits[0].DepositCount) + assert.Equal(t, uint32(1), deposits[0].DepositCount) assert.Equal(t, common.HexToHash("0x6282FACE883070640F802CE8A2C42593AA18D3A691C61BA006EC477D6E5FEE1F"), deposits[0].TxHash) assert.Equal(t, []byte{}, deposits[0].Metadata) assert.Equal(t, uint64(2), deposits[0].Id) assert.Equal(t, true, deposits[0].ReadyForClaim) assert.Equal(t, uint8(0), deposits[1].LeafType) - assert.Equal(t, uint(0), deposits[1].NetworkID) - assert.Equal(t, uint(0), deposits[1].OriginalNetwork) + assert.Equal(t, uint32(0), deposits[1].NetworkID) + assert.Equal(t, uint32(0), deposits[1].OriginalNetwork) assert.Equal(t, common.Address{}, deposits[1].OriginalAddress) assert.Equal(t, big.NewInt(90000000000000000), deposits[1].Amount) - assert.Equal(t, uint(1), deposits[1].DestinationNetwork) + assert.Equal(t, uint32(1), deposits[1].DestinationNetwork) assert.Equal(t, common.HexToAddress("0xF38FD6E51AAD88F6F4CE6AB8827279CFFFB92266"), deposits[1].DestinationAddress) assert.Equal(t, uint64(1), deposits[1].BlockID) - assert.Equal(t, uint(2), deposits[1].DepositCount) + assert.Equal(t, uint32(2), deposits[1].DepositCount) assert.Equal(t, common.HexToHash("0x6282FACE883070640F802CE8A2C42593AA18D3A691C61BA006EC477D6E5FEE1F"), deposits[1].TxHash) assert.Equal(t, []byte{}, deposits[1].Metadata) assert.Equal(t, uint64(3), deposits[1].Id) assert.Equal(t, true, deposits[1].ReadyForClaim) - deposits, depositCount, err = store.GetPendingDepositsToClaim(ctx, common.HexToAddress("0xF39FD6E51AAD88F6F4CE6AB8827279CFFFB92266"), 1, 0, 2, 0, nil) + deposits, totalCount, err = store.GetPendingDepositsToClaim(ctx, common.HexToAddress("0xF39FD6E51AAD88F6F4CE6AB8827279CFFFB92266"), 1, 0, 2, 0, nil) require.NoError(t, err) assert.Equal(t, 1, len(deposits)) - assert.Equal(t, uint64(1), depositCount) + assert.Equal(t, uint64(1), totalCount) assert.Equal(t, uint8(0), deposits[0].LeafType) - assert.Equal(t, uint(0), deposits[0].NetworkID) - assert.Equal(t, uint(0), deposits[0].OriginalNetwork) + assert.Equal(t, uint32(0), deposits[0].NetworkID) + assert.Equal(t, uint32(0), deposits[0].OriginalNetwork) assert.Equal(t, common.Address{}, deposits[0].OriginalAddress) assert.Equal(t, big.NewInt(90000000000000000), deposits[0].Amount) - assert.Equal(t, uint(1), deposits[0].DestinationNetwork) + assert.Equal(t, uint32(1), deposits[0].DestinationNetwork) assert.Equal(t, common.HexToAddress("0xF39FD6E51AAD88F6F4CE6AB8827279CFFFB92266"), deposits[0].DestinationAddress) assert.Equal(t, uint64(1), deposits[0].BlockID) - assert.Equal(t, uint(1), deposits[0].DepositCount) + assert.Equal(t, uint32(1), deposits[0].DepositCount) assert.Equal(t, common.HexToHash("0x6282FACE883070640F802CE8A2C42593AA18D3A691C61BA006EC477D6E5FEE1F"), deposits[0].TxHash) assert.Equal(t, []byte{}, deposits[0].Metadata) assert.Equal(t, uint64(2), deposits[0].Id) diff --git a/db/storage_test.go b/db/storage_test.go index 343c1065..2e214adc 100644 --- a/db/storage_test.go +++ b/db/storage_test.go @@ -290,11 +290,11 @@ func TestMTStorage(t *testing.T) { count, err := pg.GetLastDepositCount(ctx, 0, tx) require.NoError(t, err) - require.Equal(t, uint(0), count) + require.Equal(t, uint32(0), count) dCount, err := pg.GetDepositCountByRoot(ctx, root, 0, tx) require.NoError(t, err) - require.Equal(t, uint(0), dCount) + require.Equal(t, uint32(0), dCount) require.NoError(t, tx.Commit(ctx)) } @@ -365,12 +365,12 @@ func TestBSStorage(t *testing.T) { require.NoError(t, err) require.Equal(t, len(rDeposits), 1) - count, err = pg.GetNumberDeposits(ctx, 0, 0, tx) + countND, err := pg.GetNumberDeposits(ctx, 0, 0, tx) require.NoError(t, err) - require.Equal(t, count, uint64(0)) - count, err = pg.GetNumberDeposits(ctx, 0, 1, tx) + require.Equal(t, countND, uint32(0)) + countND, err = pg.GetNumberDeposits(ctx, 0, 1, tx) require.NoError(t, err) - require.Equal(t, count, uint64(2)) + require.Equal(t, countND, uint32(2)) count, err = pg.GetClaimCount(ctx, claim.DestinationAddress.String(), tx) require.NoError(t, err) diff --git a/etherman/etherman.go b/etherman/etherman.go index 98d4f0e5..4d6576dc 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math" "math/big" "time" @@ -450,11 +451,11 @@ func (etherMan *Client) depositEvent(ctx context.Context, vLog types.Log, blocks var deposit Deposit deposit.Amount = d.Amount deposit.BlockNumber = vLog.BlockNumber - deposit.OriginalNetwork = uint(d.OriginNetwork) + deposit.OriginalNetwork = d.OriginNetwork deposit.DestinationAddress = d.DestinationAddress - deposit.DestinationNetwork = uint(d.DestinationNetwork) + deposit.DestinationNetwork = d.DestinationNetwork deposit.OriginalAddress = d.OriginAddress - deposit.DepositCount = uint(d.DepositCount) + deposit.DepositCount = d.DepositCount deposit.TxHash = vLog.TxHash deposit.Metadata = d.Metadata deposit.LeafType = d.LeafType @@ -487,7 +488,7 @@ func (etherMan *Client) oldClaimEvent(ctx context.Context, vLog types.Log, block if err != nil { return err } - return etherMan.claimEvent(ctx, vLog, blocks, blocksOrder, c.Amount, c.DestinationAddress, c.OriginAddress, uint(c.Index), uint(c.OriginNetwork), 0, false) + return etherMan.claimEvent(ctx, vLog, blocks, blocksOrder, c.Amount, c.DestinationAddress, c.OriginAddress, c.Index, c.OriginNetwork, 0, false) } func (etherMan *Client) newClaimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { @@ -500,10 +501,10 @@ func (etherMan *Client) newClaimEvent(ctx context.Context, vLog types.Log, block if err != nil { return err } - return etherMan.claimEvent(ctx, vLog, blocks, blocksOrder, c.Amount, c.DestinationAddress, c.OriginAddress, uint(localExitRootIndex), uint(c.OriginNetwork), rollupIndex, mainnetFlag) + return etherMan.claimEvent(ctx, vLog, blocks, blocksOrder, c.Amount, c.DestinationAddress, c.OriginAddress, localExitRootIndex, c.OriginNetwork, rollupIndex, mainnetFlag) } -func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, amount *big.Int, destinationAddress, originAddress common.Address, Index uint, originNetwork uint, rollupIndex uint64, mainnetFlag bool) error { +func (etherMan *Client) claimEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, amount *big.Int, destinationAddress, originAddress common.Address, Index, originNetwork, rollupIndex uint32, mainnetFlag bool) error { var claim Claim claim.Amount = amount claim.DestinationAddress = destinationAddress @@ -544,7 +545,7 @@ func (etherMan *Client) tokenWrappedEvent(ctx context.Context, vLog types.Log, b return err } var tokenWrapped TokenWrapped - tokenWrapped.OriginalNetwork = uint(tw.OriginNetwork) + tokenWrapped.OriginalNetwork = tw.OriginNetwork tokenWrapped.OriginalTokenAddress = tw.OriginTokenAddress tokenWrapped.WrappedTokenAddress = tw.WrappedTokenAddress tokenWrapped.BlockNumber = vLog.BlockNumber @@ -609,8 +610,8 @@ func (etherMan *Client) EthBlockByNumber(ctx context.Context, blockNumber uint64 } // GetNetworkID gets the network ID of the dedicated chain. -func (etherMan *Client) GetNetworkID() uint { - return uint(etherMan.NetworkID) +func (etherMan *Client) GetNetworkID() uint32 { + return etherMan.NetworkID } func (etherMan *Client) verifyBatchesTrustedAggregatorEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { @@ -620,7 +621,7 @@ func (etherMan *Client) verifyBatchesTrustedAggregatorEvent(ctx context.Context, etherMan.logger.Error("error parsing verifyBatchesTrustedAggregator event. Error: ", err) return err } - return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) + return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, vb.RollupID, vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) } func (etherMan *Client) verifyBatchesEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error { @@ -630,10 +631,10 @@ func (etherMan *Client) verifyBatchesEvent(ctx context.Context, vLog types.Log, etherMan.logger.Error("error parsing VerifyBatches event. Error: ", err) return err } - return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, uint(vb.RollupID), vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) + return etherMan.verifyBatches(ctx, vLog, blocks, blocksOrder, vb.RollupID, vb.NumBatch, vb.StateRoot, vb.ExitRoot, vb.Aggregator) } -func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, rollupID uint, batchNum uint64, stateRoot, localExitRoot common.Hash, aggregator common.Address) error { +func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order, rollupID uint32, batchNum uint64, stateRoot, localExitRoot common.Hash, aggregator common.Address) error { var verifyBatch VerifiedBatch verifyBatch.BlockNumber = vLog.BlockNumber verifyBatch.BatchNumber = batchNum @@ -665,7 +666,7 @@ func (etherMan *Client) verifyBatches(ctx context.Context, vLog types.Log, block return nil } -func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint64, uint64, error) { +func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint32, uint32, error) { const lengthGlobalIndexInBytes = 32 var buf [32]byte gIBytes := globalIndex.FillBytes(buf[:]) @@ -675,10 +676,16 @@ func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint64, uint64, error) { mainnetFlag := big.NewInt(0).SetBytes([]byte{gIBytes[23]}).Uint64() == 1 rollupIndex := big.NewInt(0).SetBytes(gIBytes[24:28]) localRootIndex := big.NewInt(0).SetBytes(gIBytes[29:32]) - return mainnetFlag, rollupIndex.Uint64(), localRootIndex.Uint64(), nil + if rollupIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid rollupIndex length. Should be fit into uint32 type") + } + if localRootIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid localRootIndex length. Should be fit into uint32 type") + } + return mainnetFlag, uint32(rollupIndex.Uint64()), uint32(localRootIndex.Uint64()), nil } -func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint, localExitRootIndex uint) *big.Int { +func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int { var ( globalIndexBytes []byte buf [4]byte diff --git a/etherman/etherman_test.go b/etherman/etherman_test.go index b1be6d7c..f3019d93 100644 --- a/etherman/etherman_test.go +++ b/etherman/etherman_test.go @@ -93,7 +93,7 @@ func TestBridgeEvents(t *testing.T) { assert.Equal(t, GlobalExitRootsOrder, order[block[0].BlockHash][1].Name) assert.Equal(t, uint64(8), block[0].BlockNumber) assert.Equal(t, big.NewInt(9000000000000000000), block[0].Deposits[0].Amount) - assert.Equal(t, uint(destNetwork), block[0].Deposits[0].DestinationNetwork) + assert.Equal(t, destNetwork, block[0].Deposits[0].DestinationNetwork) assert.Equal(t, destinationAddr, block[0].Deposits[0].DestinationAddress) assert.Equal(t, 1, len(block[0].GlobalExitRoots)) @@ -125,10 +125,10 @@ func TestBridgeEvents(t *testing.T) { assert.Equal(t, uint64(9), block[0].BlockNumber) assert.NotEqual(t, common.Address{}, block[0].Claims[0].OriginalAddress) assert.Equal(t, auth.From, block[0].Claims[0].DestinationAddress) - assert.Equal(t, uint(34), block[0].Claims[0].Index) - assert.Equal(t, uint64(0), block[0].Claims[0].RollupIndex) + assert.Equal(t, uint32(34), block[0].Claims[0].Index) + assert.Equal(t, uint32(0), block[0].Claims[0].RollupIndex) assert.Equal(t, true, block[0].Claims[0].MainnetFlag) - assert.Equal(t, uint(0), block[0].Claims[0].OriginalNetwork) + assert.Equal(t, uint32(0), block[0].Claims[0].OriginalNetwork) assert.Equal(t, uint64(9), block[0].Claims[0].BlockNumber) } @@ -143,8 +143,8 @@ func TestDecodeGlobalIndex(t *testing.T) { mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(globalIndex) require.NoError(t, err) assert.Equal(t, false, mainnetFlag) - assert.Equal(t, uint64(1), rollupIndex) - assert.Equal(t, uint64(11), localExitRootIndex) + assert.Equal(t, uint32(1), rollupIndex) + assert.Equal(t, uint32(11), localExitRootIndex) globalIndex, _ = big.NewInt(0).SetString("8589934604", 0) @@ -155,8 +155,8 @@ func TestDecodeGlobalIndex(t *testing.T) { mainnetFlag, rollupIndex, localExitRootIndex, err = DecodeGlobalIndex(globalIndex) require.NoError(t, err) assert.Equal(t, false, mainnetFlag) - assert.Equal(t, uint64(2), rollupIndex) - assert.Equal(t, uint64(12), localExitRootIndex) + assert.Equal(t, uint32(2), rollupIndex) + assert.Equal(t, uint32(12), localExitRootIndex) globalIndex, _ = big.NewInt(0).SetString("18446744073709551627", 0) @@ -167,8 +167,8 @@ func TestDecodeGlobalIndex(t *testing.T) { mainnetFlag, rollupIndex, localExitRootIndex, err = DecodeGlobalIndex(globalIndex) require.NoError(t, err) assert.Equal(t, true, mainnetFlag) - assert.Equal(t, uint64(0), rollupIndex) - assert.Equal(t, uint64(11), localExitRootIndex) + assert.Equal(t, uint32(0), rollupIndex) + assert.Equal(t, uint32(11), localExitRootIndex) globalIndex, _ = big.NewInt(0).SetString("18446744073709551616", 0) @@ -179,8 +179,8 @@ func TestDecodeGlobalIndex(t *testing.T) { mainnetFlag, rollupIndex, localExitRootIndex, err = DecodeGlobalIndex(globalIndex) require.NoError(t, err) assert.Equal(t, true, mainnetFlag) - assert.Equal(t, uint64(0), rollupIndex) - assert.Equal(t, uint64(0), localExitRootIndex) + assert.Equal(t, uint32(0), rollupIndex) + assert.Equal(t, uint32(0), localExitRootIndex) } func TestVerifyBatchEvent(t *testing.T) { @@ -235,7 +235,7 @@ func TestVerifyBatchEvent(t *testing.T) { func TestGenerateGlobalIndex(t *testing.T) { globalIndex, _ := big.NewInt(0).SetString("4294967307", 0) - mainnetFlag, rollupIndex, localExitRootIndex := false, uint(1), uint(11) + mainnetFlag, rollupIndex, localExitRootIndex := false, uint32(1), uint32(11) globalIndexGenerated := GenerateGlobalIndex(mainnetFlag, rollupIndex, localExitRootIndex) t.Log("First test number:") for _, n := range globalIndexGenerated.Bytes() { @@ -244,7 +244,7 @@ func TestGenerateGlobalIndex(t *testing.T) { assert.Equal(t, globalIndex, globalIndexGenerated) globalIndex, _ = big.NewInt(0).SetString("8589934604", 0) - mainnetFlag, rollupIndex, localExitRootIndex = false, uint(2), uint(12) + mainnetFlag, rollupIndex, localExitRootIndex = false, uint32(2), uint32(12) globalIndexGenerated = GenerateGlobalIndex(mainnetFlag, rollupIndex, localExitRootIndex) t.Log("Second test number:") for _, n := range globalIndexGenerated.Bytes() { @@ -253,7 +253,7 @@ func TestGenerateGlobalIndex(t *testing.T) { assert.Equal(t, globalIndex, globalIndexGenerated) globalIndex, _ = big.NewInt(0).SetString("18446744073709551627", 0) - mainnetFlag, rollupIndex, localExitRootIndex = true, uint(0), uint(11) + mainnetFlag, rollupIndex, localExitRootIndex = true, uint32(0), uint32(11) globalIndexGenerated = GenerateGlobalIndex(mainnetFlag, rollupIndex, localExitRootIndex) t.Log("Third test number:") for _, n := range globalIndexGenerated.Bytes() { diff --git a/etherman/types.go b/etherman/types.go index 800f8c9b..54ad6f2f 100644 --- a/etherman/types.go +++ b/etherman/types.go @@ -13,7 +13,7 @@ type Block struct { BlockNumber uint64 BlockHash common.Hash ParentHash common.Hash - NetworkID uint + NetworkID uint32 GlobalExitRoots []GlobalExitRoot Deposits []Deposit Claims []Claim @@ -29,22 +29,22 @@ type GlobalExitRoot struct { BlockNumber uint64 ExitRoots []common.Hash GlobalExitRoot common.Hash - NetworkID uint + NetworkID uint32 } // Deposit struct type Deposit struct { Id uint64 LeafType uint8 - OriginalNetwork uint + OriginalNetwork uint32 OriginalAddress common.Address Amount *big.Int - DestinationNetwork uint + DestinationNetwork uint32 DestinationAddress common.Address - DepositCount uint + DepositCount uint32 BlockID uint64 BlockNumber uint64 - NetworkID uint + NetworkID uint32 TxHash common.Hash Metadata []byte // it is only used for the bridge service @@ -54,27 +54,27 @@ type Deposit struct { // Claim struct type Claim struct { MainnetFlag bool - RollupIndex uint64 - Index uint - OriginalNetwork uint + RollupIndex uint32 + Index uint32 + OriginalNetwork uint32 OriginalAddress common.Address Amount *big.Int DestinationAddress common.Address BlockID uint64 BlockNumber uint64 - NetworkID uint + NetworkID uint32 TxHash common.Hash } // TokenWrapped struct type TokenWrapped struct { TokenMetadata - OriginalNetwork uint + OriginalNetwork uint32 OriginalTokenAddress common.Address WrappedTokenAddress common.Address BlockID uint64 BlockNumber uint64 - NetworkID uint + NetworkID uint32 } // TokenMetadata is a metadata of ERC20 token. @@ -87,7 +87,7 @@ type TokenMetadata struct { type VerifiedBatch struct { BlockNumber uint64 BatchNumber uint64 - RollupID uint + RollupID uint32 LocalExitRoot common.Hash TxHash common.Hash StateRoot common.Hash @@ -99,6 +99,6 @@ type RollupExitLeaf struct { ID uint64 BlockID uint64 Leaf common.Hash - RollupId uint + RollupId uint32 Root common.Hash } diff --git a/proto/src/proto/bridge/v1/query.proto b/proto/src/proto/bridge/v1/query.proto index 498f107c..4e3c5c9f 100644 --- a/proto/src/proto/bridge/v1/query.proto +++ b/proto/src/proto/bridge/v1/query.proto @@ -87,7 +87,7 @@ message Deposit { uint32 dest_net = 5; string dest_addr = 6; uint64 block_num = 7; - uint64 deposit_cnt = 8; + uint32 deposit_cnt = 8; uint32 network_id = 9; string tx_hash = 10; string claim_tx_hash = 11; @@ -98,7 +98,7 @@ message Deposit { // Claim message message Claim { - uint64 index = 1; + uint32 index = 1; uint32 orig_net = 2; string orig_addr = 3; string amount = 4; @@ -106,7 +106,7 @@ message Claim { string dest_addr = 6; uint64 block_num = 7; string tx_hash = 8; - uint64 rollup_index = 9; + uint32 rollup_index = 9; bool mainnet_flag = 10; } @@ -124,26 +124,26 @@ message CheckAPIRequest {} message GetBridgesRequest { string dest_addr = 1; - uint64 offset = 2; + uint32 offset = 2; uint32 limit = 3; } message GetPendingBridgesRequest { string dest_addr = 1; - uint64 dest_net = 2; + uint32 dest_net = 2; uint32 leaf_type = 3; - uint64 offset = 4; + uint32 offset = 4; uint32 limit = 5; } message GetProofRequest { uint32 net_id = 1; - uint64 deposit_cnt = 2; + uint32 deposit_cnt = 2; } message GetProofByGERRequest { uint32 net_id = 1; - uint64 deposit_cnt = 2; + uint32 deposit_cnt = 2; string ger = 3; } @@ -154,12 +154,12 @@ message GetTokenWrappedRequest { message GetBridgeRequest { uint32 net_id = 1; - uint64 deposit_cnt = 2; + uint32 deposit_cnt = 2; } message GetClaimsRequest { string dest_addr = 1; - uint64 offset = 2; + uint32 offset = 2; uint32 limit = 3; } diff --git a/server/interfaces.go b/server/interfaces.go index 65da09cd..94a82901 100644 --- a/server/interfaces.go +++ b/server/interfaces.go @@ -10,17 +10,17 @@ import ( type bridgeServiceStorage interface { Get(ctx context.Context, key []byte, dbTx pgx.Tx) ([][]byte, error) - GetRoot(ctx context.Context, depositCnt uint, network uint, dbTx pgx.Tx) ([]byte, error) - GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) - GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetRoot(ctx context.Context, depositCnt, network uint32, dbTx pgx.Tx) ([]byte, error) + GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (uint32, error) + GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetExitRootByGER(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) - GetClaim(ctx context.Context, index uint, originNetworkID, networkID uint, dbTx pgx.Tx) (*etherman.Claim, error) - GetClaims(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Claim, error) + GetClaim(ctx context.Context, index, originNetworkID, networkID uint32, dbTx pgx.Tx) (*etherman.Claim, error) + GetClaims(ctx context.Context, destAddr string, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Claim, error) GetClaimCount(ctx context.Context, destAddr string, dbTx pgx.Tx) (uint64, error) - GetDeposit(ctx context.Context, depositCnt uint, networkID uint, dbTx pgx.Tx) (*etherman.Deposit, error) - GetDeposits(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) + GetDeposit(ctx context.Context, depositCnt, networkID uint32, dbTx pgx.Tx) (*etherman.Deposit, error) + GetDeposits(ctx context.Context, destAddr string, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) GetDepositCount(ctx context.Context, destAddr string, dbTx pgx.Tx) (uint64, error) - GetTokenWrapped(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) + GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) GetRollupExitLeavesByRoot(ctx context.Context, root common.Hash, dbTx pgx.Tx) ([]etherman.RollupExitLeaf, error) - GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork uint64, leafType, limit uint32, offset uint64, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) + GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork, leafType, limit, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) } diff --git a/server/mock_bridgeServiceStorage.go b/server/mock_bridgeServiceStorage.go index 8ed35ce2..0241c3c1 100644 --- a/server/mock_bridgeServiceStorage.go +++ b/server/mock_bridgeServiceStorage.go @@ -88,7 +88,7 @@ func (_c *bridgeServiceStorageMock_Get_Call) RunAndReturn(run func(context.Conte } // GetClaim provides a mock function with given fields: ctx, index, originNetworkID, networkID, dbTx -func (_m *bridgeServiceStorageMock) GetClaim(ctx context.Context, index uint, originNetworkID uint, networkID uint, dbTx pgx.Tx) (*etherman.Claim, error) { +func (_m *bridgeServiceStorageMock) GetClaim(ctx context.Context, index uint32, originNetworkID uint32, networkID uint32, dbTx pgx.Tx) (*etherman.Claim, error) { ret := _m.Called(ctx, index, originNetworkID, networkID, dbTx) if len(ret) == 0 { @@ -97,10 +97,10 @@ func (_m *bridgeServiceStorageMock) GetClaim(ctx context.Context, index uint, or var r0 *etherman.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint, pgx.Tx) (*etherman.Claim, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, uint32, pgx.Tx) (*etherman.Claim, error)); ok { return rf(ctx, index, originNetworkID, networkID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, uint, pgx.Tx) *etherman.Claim); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, uint32, pgx.Tx) *etherman.Claim); ok { r0 = rf(ctx, index, originNetworkID, networkID, dbTx) } else { if ret.Get(0) != nil { @@ -108,7 +108,7 @@ func (_m *bridgeServiceStorageMock) GetClaim(ctx context.Context, index uint, or } } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, index, originNetworkID, networkID, dbTx) } else { r1 = ret.Error(1) @@ -124,17 +124,17 @@ type bridgeServiceStorageMock_GetClaim_Call struct { // GetClaim is a helper method to define mock.On call // - ctx context.Context -// - index uint -// - originNetworkID uint -// - networkID uint +// - index uint32 +// - originNetworkID uint32 +// - networkID uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetClaim(ctx interface{}, index interface{}, originNetworkID interface{}, networkID interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetClaim_Call { return &bridgeServiceStorageMock_GetClaim_Call{Call: _e.mock.On("GetClaim", ctx, index, originNetworkID, networkID, dbTx)} } -func (_c *bridgeServiceStorageMock_GetClaim_Call) Run(run func(ctx context.Context, index uint, originNetworkID uint, networkID uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetClaim_Call { +func (_c *bridgeServiceStorageMock_GetClaim_Call) Run(run func(ctx context.Context, index uint32, originNetworkID uint32, networkID uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetClaim_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(uint), args[4].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(uint32), args[4].(pgx.Tx)) }) return _c } @@ -144,7 +144,7 @@ func (_c *bridgeServiceStorageMock_GetClaim_Call) Return(_a0 *etherman.Claim, _a return _c } -func (_c *bridgeServiceStorageMock_GetClaim_Call) RunAndReturn(run func(context.Context, uint, uint, uint, pgx.Tx) (*etherman.Claim, error)) *bridgeServiceStorageMock_GetClaim_Call { +func (_c *bridgeServiceStorageMock_GetClaim_Call) RunAndReturn(run func(context.Context, uint32, uint32, uint32, pgx.Tx) (*etherman.Claim, error)) *bridgeServiceStorageMock_GetClaim_Call { _c.Call.Return(run) return _c } @@ -208,7 +208,7 @@ func (_c *bridgeServiceStorageMock_GetClaimCount_Call) RunAndReturn(run func(con } // GetClaims provides a mock function with given fields: ctx, destAddr, limit, offset, dbTx -func (_m *bridgeServiceStorageMock) GetClaims(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Claim, error) { +func (_m *bridgeServiceStorageMock) GetClaims(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx pgx.Tx) ([]*etherman.Claim, error) { ret := _m.Called(ctx, destAddr, limit, offset, dbTx) if len(ret) == 0 { @@ -217,10 +217,10 @@ func (_m *bridgeServiceStorageMock) GetClaims(ctx context.Context, destAddr stri var r0 []*etherman.Claim var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint, uint, pgx.Tx) ([]*etherman.Claim, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, uint32, uint32, pgx.Tx) ([]*etherman.Claim, error)); ok { return rf(ctx, destAddr, limit, offset, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, string, uint, uint, pgx.Tx) []*etherman.Claim); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, uint32, uint32, pgx.Tx) []*etherman.Claim); ok { r0 = rf(ctx, destAddr, limit, offset, dbTx) } else { if ret.Get(0) != nil { @@ -228,7 +228,7 @@ func (_m *bridgeServiceStorageMock) GetClaims(ctx context.Context, destAddr stri } } - if rf, ok := ret.Get(1).(func(context.Context, string, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, destAddr, limit, offset, dbTx) } else { r1 = ret.Error(1) @@ -245,16 +245,16 @@ type bridgeServiceStorageMock_GetClaims_Call struct { // GetClaims is a helper method to define mock.On call // - ctx context.Context // - destAddr string -// - limit uint -// - offset uint +// - limit uint32 +// - offset uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetClaims(ctx interface{}, destAddr interface{}, limit interface{}, offset interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetClaims_Call { return &bridgeServiceStorageMock_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, destAddr, limit, offset, dbTx)} } -func (_c *bridgeServiceStorageMock_GetClaims_Call) Run(run func(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetClaims_Call { +func (_c *bridgeServiceStorageMock_GetClaims_Call) Run(run func(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetClaims_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint), args[3].(uint), args[4].(pgx.Tx)) + run(args[0].(context.Context), args[1].(string), args[2].(uint32), args[3].(uint32), args[4].(pgx.Tx)) }) return _c } @@ -264,13 +264,13 @@ func (_c *bridgeServiceStorageMock_GetClaims_Call) Return(_a0 []*etherman.Claim, return _c } -func (_c *bridgeServiceStorageMock_GetClaims_Call) RunAndReturn(run func(context.Context, string, uint, uint, pgx.Tx) ([]*etherman.Claim, error)) *bridgeServiceStorageMock_GetClaims_Call { +func (_c *bridgeServiceStorageMock_GetClaims_Call) RunAndReturn(run func(context.Context, string, uint32, uint32, pgx.Tx) ([]*etherman.Claim, error)) *bridgeServiceStorageMock_GetClaims_Call { _c.Call.Return(run) return _c } // GetDeposit provides a mock function with given fields: ctx, depositCnt, networkID, dbTx -func (_m *bridgeServiceStorageMock) GetDeposit(ctx context.Context, depositCnt uint, networkID uint, dbTx pgx.Tx) (*etherman.Deposit, error) { +func (_m *bridgeServiceStorageMock) GetDeposit(ctx context.Context, depositCnt uint32, networkID uint32, dbTx pgx.Tx) (*etherman.Deposit, error) { ret := _m.Called(ctx, depositCnt, networkID, dbTx) if len(ret) == 0 { @@ -279,10 +279,10 @@ func (_m *bridgeServiceStorageMock) GetDeposit(ctx context.Context, depositCnt u var r0 *etherman.Deposit var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) (*etherman.Deposit, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) (*etherman.Deposit, error)); ok { return rf(ctx, depositCnt, networkID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) *etherman.Deposit); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) *etherman.Deposit); ok { r0 = rf(ctx, depositCnt, networkID, dbTx) } else { if ret.Get(0) != nil { @@ -290,7 +290,7 @@ func (_m *bridgeServiceStorageMock) GetDeposit(ctx context.Context, depositCnt u } } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, depositCnt, networkID, dbTx) } else { r1 = ret.Error(1) @@ -306,16 +306,16 @@ type bridgeServiceStorageMock_GetDeposit_Call struct { // GetDeposit is a helper method to define mock.On call // - ctx context.Context -// - depositCnt uint -// - networkID uint +// - depositCnt uint32 +// - networkID uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetDeposit(ctx interface{}, depositCnt interface{}, networkID interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetDeposit_Call { return &bridgeServiceStorageMock_GetDeposit_Call{Call: _e.mock.On("GetDeposit", ctx, depositCnt, networkID, dbTx)} } -func (_c *bridgeServiceStorageMock_GetDeposit_Call) Run(run func(ctx context.Context, depositCnt uint, networkID uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDeposit_Call { +func (_c *bridgeServiceStorageMock_GetDeposit_Call) Run(run func(ctx context.Context, depositCnt uint32, networkID uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDeposit_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -325,7 +325,7 @@ func (_c *bridgeServiceStorageMock_GetDeposit_Call) Return(_a0 *etherman.Deposit return _c } -func (_c *bridgeServiceStorageMock_GetDeposit_Call) RunAndReturn(run func(context.Context, uint, uint, pgx.Tx) (*etherman.Deposit, error)) *bridgeServiceStorageMock_GetDeposit_Call { +func (_c *bridgeServiceStorageMock_GetDeposit_Call) RunAndReturn(run func(context.Context, uint32, uint32, pgx.Tx) (*etherman.Deposit, error)) *bridgeServiceStorageMock_GetDeposit_Call { _c.Call.Return(run) return _c } @@ -389,25 +389,25 @@ func (_c *bridgeServiceStorageMock_GetDepositCount_Call) RunAndReturn(run func(c } // GetDepositCountByRoot provides a mock function with given fields: ctx, root, network, dbTx -func (_m *bridgeServiceStorageMock) GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) { +func (_m *bridgeServiceStorageMock) GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (uint32, error) { ret := _m.Called(ctx, root, network, dbTx) if len(ret) == 0 { panic("no return value specified for GetDepositCountByRoot") } - var r0 uint + var r0 uint32 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint8, pgx.Tx) (uint, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) (uint32, error)); ok { return rf(ctx, root, network, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint8, pgx.Tx) uint); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) uint32); ok { r0 = rf(ctx, root, network, dbTx) } else { - r0 = ret.Get(0).(uint) + r0 = ret.Get(0).(uint32) } - if rf, ok := ret.Get(1).(func(context.Context, []byte, uint8, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint32, pgx.Tx) error); ok { r1 = rf(ctx, root, network, dbTx) } else { r1 = ret.Error(1) @@ -424,31 +424,31 @@ type bridgeServiceStorageMock_GetDepositCountByRoot_Call struct { // GetDepositCountByRoot is a helper method to define mock.On call // - ctx context.Context // - root []byte -// - network uint8 +// - network uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetDepositCountByRoot(ctx interface{}, root interface{}, network interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { return &bridgeServiceStorageMock_GetDepositCountByRoot_Call{Call: _e.mock.On("GetDepositCountByRoot", ctx, root, network, dbTx)} } -func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) Run(run func(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { +func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) Run(run func(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]byte), args[2].(uint8), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]byte), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } -func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) Return(_a0 uint, _a1 error) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { +func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) Return(_a0 uint32, _a1 error) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) RunAndReturn(run func(context.Context, []byte, uint8, pgx.Tx) (uint, error)) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { +func (_c *bridgeServiceStorageMock_GetDepositCountByRoot_Call) RunAndReturn(run func(context.Context, []byte, uint32, pgx.Tx) (uint32, error)) *bridgeServiceStorageMock_GetDepositCountByRoot_Call { _c.Call.Return(run) return _c } // GetDeposits provides a mock function with given fields: ctx, destAddr, limit, offset, dbTx -func (_m *bridgeServiceStorageMock) GetDeposits(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Deposit, error) { +func (_m *bridgeServiceStorageMock) GetDeposits(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, error) { ret := _m.Called(ctx, destAddr, limit, offset, dbTx) if len(ret) == 0 { @@ -457,10 +457,10 @@ func (_m *bridgeServiceStorageMock) GetDeposits(ctx context.Context, destAddr st var r0 []*etherman.Deposit var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint, uint, pgx.Tx) ([]*etherman.Deposit, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, uint32, uint32, pgx.Tx) ([]*etherman.Deposit, error)); ok { return rf(ctx, destAddr, limit, offset, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, string, uint, uint, pgx.Tx) []*etherman.Deposit); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, uint32, uint32, pgx.Tx) []*etherman.Deposit); ok { r0 = rf(ctx, destAddr, limit, offset, dbTx) } else { if ret.Get(0) != nil { @@ -468,7 +468,7 @@ func (_m *bridgeServiceStorageMock) GetDeposits(ctx context.Context, destAddr st } } - if rf, ok := ret.Get(1).(func(context.Context, string, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, destAddr, limit, offset, dbTx) } else { r1 = ret.Error(1) @@ -485,16 +485,16 @@ type bridgeServiceStorageMock_GetDeposits_Call struct { // GetDeposits is a helper method to define mock.On call // - ctx context.Context // - destAddr string -// - limit uint -// - offset uint +// - limit uint32 +// - offset uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetDeposits(ctx interface{}, destAddr interface{}, limit interface{}, offset interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetDeposits_Call { return &bridgeServiceStorageMock_GetDeposits_Call{Call: _e.mock.On("GetDeposits", ctx, destAddr, limit, offset, dbTx)} } -func (_c *bridgeServiceStorageMock_GetDeposits_Call) Run(run func(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDeposits_Call { +func (_c *bridgeServiceStorageMock_GetDeposits_Call) Run(run func(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetDeposits_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint), args[3].(uint), args[4].(pgx.Tx)) + run(args[0].(context.Context), args[1].(string), args[2].(uint32), args[3].(uint32), args[4].(pgx.Tx)) }) return _c } @@ -504,7 +504,7 @@ func (_c *bridgeServiceStorageMock_GetDeposits_Call) Return(_a0 []*etherman.Depo return _c } -func (_c *bridgeServiceStorageMock_GetDeposits_Call) RunAndReturn(run func(context.Context, string, uint, uint, pgx.Tx) ([]*etherman.Deposit, error)) *bridgeServiceStorageMock_GetDeposits_Call { +func (_c *bridgeServiceStorageMock_GetDeposits_Call) RunAndReturn(run func(context.Context, string, uint32, uint32, pgx.Tx) ([]*etherman.Deposit, error)) *bridgeServiceStorageMock_GetDeposits_Call { _c.Call.Return(run) return _c } @@ -570,7 +570,7 @@ func (_c *bridgeServiceStorageMock_GetExitRootByGER_Call) RunAndReturn(run func( } // GetLatestExitRoot provides a mock function with given fields: ctx, networkID, destNetwork, dbTx -func (_m *bridgeServiceStorageMock) GetLatestExitRoot(ctx context.Context, networkID uint, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { +func (_m *bridgeServiceStorageMock) GetLatestExitRoot(ctx context.Context, networkID uint32, destNetwork uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) { ret := _m.Called(ctx, networkID, destNetwork, dbTx) if len(ret) == 0 { @@ -579,10 +579,10 @@ func (_m *bridgeServiceStorageMock) GetLatestExitRoot(ctx context.Context, netwo var r0 *etherman.GlobalExitRoot var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) (*etherman.GlobalExitRoot, error)); ok { return rf(ctx, networkID, destNetwork, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) *etherman.GlobalExitRoot); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) *etherman.GlobalExitRoot); ok { r0 = rf(ctx, networkID, destNetwork, dbTx) } else { if ret.Get(0) != nil { @@ -590,7 +590,7 @@ func (_m *bridgeServiceStorageMock) GetLatestExitRoot(ctx context.Context, netwo } } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, networkID, destNetwork, dbTx) } else { r1 = ret.Error(1) @@ -606,16 +606,16 @@ type bridgeServiceStorageMock_GetLatestExitRoot_Call struct { // GetLatestExitRoot is a helper method to define mock.On call // - ctx context.Context -// - networkID uint -// - destNetwork uint +// - networkID uint32 +// - destNetwork uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetLatestExitRoot(ctx interface{}, networkID interface{}, destNetwork interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetLatestExitRoot_Call { return &bridgeServiceStorageMock_GetLatestExitRoot_Call{Call: _e.mock.On("GetLatestExitRoot", ctx, networkID, destNetwork, dbTx)} } -func (_c *bridgeServiceStorageMock_GetLatestExitRoot_Call) Run(run func(ctx context.Context, networkID uint, destNetwork uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetLatestExitRoot_Call { +func (_c *bridgeServiceStorageMock_GetLatestExitRoot_Call) Run(run func(ctx context.Context, networkID uint32, destNetwork uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetLatestExitRoot_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -625,13 +625,13 @@ func (_c *bridgeServiceStorageMock_GetLatestExitRoot_Call) Return(_a0 *etherman. return _c } -func (_c *bridgeServiceStorageMock_GetLatestExitRoot_Call) RunAndReturn(run func(context.Context, uint, uint, pgx.Tx) (*etherman.GlobalExitRoot, error)) *bridgeServiceStorageMock_GetLatestExitRoot_Call { +func (_c *bridgeServiceStorageMock_GetLatestExitRoot_Call) RunAndReturn(run func(context.Context, uint32, uint32, pgx.Tx) (*etherman.GlobalExitRoot, error)) *bridgeServiceStorageMock_GetLatestExitRoot_Call { _c.Call.Return(run) return _c } // GetPendingDepositsToClaim provides a mock function with given fields: ctx, destAddress, destNetwork, leafType, limit, offset, dbTx -func (_m *bridgeServiceStorageMock) GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork uint64, leafType uint32, limit uint32, offset uint64, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) { +func (_m *bridgeServiceStorageMock) GetPendingDepositsToClaim(ctx context.Context, destAddress common.Address, destNetwork uint32, leafType uint32, limit uint32, offset uint32, dbTx pgx.Tx) ([]*etherman.Deposit, uint64, error) { ret := _m.Called(ctx, destAddress, destNetwork, leafType, limit, offset, dbTx) if len(ret) == 0 { @@ -641,10 +641,10 @@ func (_m *bridgeServiceStorageMock) GetPendingDepositsToClaim(ctx context.Contex var r0 []*etherman.Deposit var r1 uint64 var r2 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, uint64, uint32, uint32, uint64, pgx.Tx) ([]*etherman.Deposit, uint64, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Address, uint32, uint32, uint32, uint32, pgx.Tx) ([]*etherman.Deposit, uint64, error)); ok { return rf(ctx, destAddress, destNetwork, leafType, limit, offset, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, uint64, uint32, uint32, uint64, pgx.Tx) []*etherman.Deposit); ok { + if rf, ok := ret.Get(0).(func(context.Context, common.Address, uint32, uint32, uint32, uint32, pgx.Tx) []*etherman.Deposit); ok { r0 = rf(ctx, destAddress, destNetwork, leafType, limit, offset, dbTx) } else { if ret.Get(0) != nil { @@ -652,13 +652,13 @@ func (_m *bridgeServiceStorageMock) GetPendingDepositsToClaim(ctx context.Contex } } - if rf, ok := ret.Get(1).(func(context.Context, common.Address, uint64, uint32, uint32, uint64, pgx.Tx) uint64); ok { + if rf, ok := ret.Get(1).(func(context.Context, common.Address, uint32, uint32, uint32, uint32, pgx.Tx) uint64); ok { r1 = rf(ctx, destAddress, destNetwork, leafType, limit, offset, dbTx) } else { r1 = ret.Get(1).(uint64) } - if rf, ok := ret.Get(2).(func(context.Context, common.Address, uint64, uint32, uint32, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, common.Address, uint32, uint32, uint32, uint32, pgx.Tx) error); ok { r2 = rf(ctx, destAddress, destNetwork, leafType, limit, offset, dbTx) } else { r2 = ret.Error(2) @@ -675,18 +675,18 @@ type bridgeServiceStorageMock_GetPendingDepositsToClaim_Call struct { // GetPendingDepositsToClaim is a helper method to define mock.On call // - ctx context.Context // - destAddress common.Address -// - destNetwork uint64 +// - destNetwork uint32 // - leafType uint32 // - limit uint32 -// - offset uint64 +// - offset uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetPendingDepositsToClaim(ctx interface{}, destAddress interface{}, destNetwork interface{}, leafType interface{}, limit interface{}, offset interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call { return &bridgeServiceStorageMock_GetPendingDepositsToClaim_Call{Call: _e.mock.On("GetPendingDepositsToClaim", ctx, destAddress, destNetwork, leafType, limit, offset, dbTx)} } -func (_c *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call) Run(run func(ctx context.Context, destAddress common.Address, destNetwork uint64, leafType uint32, limit uint32, offset uint64, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call { +func (_c *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call) Run(run func(ctx context.Context, destAddress common.Address, destNetwork uint32, leafType uint32, limit uint32, offset uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address), args[2].(uint64), args[3].(uint32), args[4].(uint32), args[5].(uint64), args[6].(pgx.Tx)) + run(args[0].(context.Context), args[1].(common.Address), args[2].(uint32), args[3].(uint32), args[4].(uint32), args[5].(uint32), args[6].(pgx.Tx)) }) return _c } @@ -696,7 +696,7 @@ func (_c *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call) Return(_a0 [] return _c } -func (_c *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call) RunAndReturn(run func(context.Context, common.Address, uint64, uint32, uint32, uint64, pgx.Tx) ([]*etherman.Deposit, uint64, error)) *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call { +func (_c *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call) RunAndReturn(run func(context.Context, common.Address, uint32, uint32, uint32, uint32, pgx.Tx) ([]*etherman.Deposit, uint64, error)) *bridgeServiceStorageMock_GetPendingDepositsToClaim_Call { _c.Call.Return(run) return _c } @@ -762,7 +762,7 @@ func (_c *bridgeServiceStorageMock_GetRollupExitLeavesByRoot_Call) RunAndReturn( } // GetRoot provides a mock function with given fields: ctx, depositCnt, network, dbTx -func (_m *bridgeServiceStorageMock) GetRoot(ctx context.Context, depositCnt uint, network uint, dbTx pgx.Tx) ([]byte, error) { +func (_m *bridgeServiceStorageMock) GetRoot(ctx context.Context, depositCnt uint32, network uint32, dbTx pgx.Tx) ([]byte, error) { ret := _m.Called(ctx, depositCnt, network, dbTx) if len(ret) == 0 { @@ -771,10 +771,10 @@ func (_m *bridgeServiceStorageMock) GetRoot(ctx context.Context, depositCnt uint var r0 []byte var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) ([]byte, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) ([]byte, error)); ok { return rf(ctx, depositCnt, network, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) []byte); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) []byte); ok { r0 = rf(ctx, depositCnt, network, dbTx) } else { if ret.Get(0) != nil { @@ -782,7 +782,7 @@ func (_m *bridgeServiceStorageMock) GetRoot(ctx context.Context, depositCnt uint } } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint32, pgx.Tx) error); ok { r1 = rf(ctx, depositCnt, network, dbTx) } else { r1 = ret.Error(1) @@ -798,16 +798,16 @@ type bridgeServiceStorageMock_GetRoot_Call struct { // GetRoot is a helper method to define mock.On call // - ctx context.Context -// - depositCnt uint -// - network uint +// - depositCnt uint32 +// - network uint32 // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetRoot(ctx interface{}, depositCnt interface{}, network interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetRoot_Call { return &bridgeServiceStorageMock_GetRoot_Call{Call: _e.mock.On("GetRoot", ctx, depositCnt, network, dbTx)} } -func (_c *bridgeServiceStorageMock_GetRoot_Call) Run(run func(ctx context.Context, depositCnt uint, network uint, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetRoot_Call { +func (_c *bridgeServiceStorageMock_GetRoot_Call) Run(run func(ctx context.Context, depositCnt uint32, network uint32, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetRoot_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -817,13 +817,13 @@ func (_c *bridgeServiceStorageMock_GetRoot_Call) Return(_a0 []byte, _a1 error) * return _c } -func (_c *bridgeServiceStorageMock_GetRoot_Call) RunAndReturn(run func(context.Context, uint, uint, pgx.Tx) ([]byte, error)) *bridgeServiceStorageMock_GetRoot_Call { +func (_c *bridgeServiceStorageMock_GetRoot_Call) RunAndReturn(run func(context.Context, uint32, uint32, pgx.Tx) ([]byte, error)) *bridgeServiceStorageMock_GetRoot_Call { _c.Call.Return(run) return _c } // GetTokenWrapped provides a mock function with given fields: ctx, originalNetwork, originalTokenAddress, dbTx -func (_m *bridgeServiceStorageMock) GetTokenWrapped(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) { +func (_m *bridgeServiceStorageMock) GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) { ret := _m.Called(ctx, originalNetwork, originalTokenAddress, dbTx) if len(ret) == 0 { @@ -832,10 +832,10 @@ func (_m *bridgeServiceStorageMock) GetTokenWrapped(ctx context.Context, origina var r0 *etherman.TokenWrapped var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, common.Address, pgx.Tx) (*etherman.TokenWrapped, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Address, pgx.Tx) (*etherman.TokenWrapped, error)); ok { return rf(ctx, originalNetwork, originalTokenAddress, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, common.Address, pgx.Tx) *etherman.TokenWrapped); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Address, pgx.Tx) *etherman.TokenWrapped); ok { r0 = rf(ctx, originalNetwork, originalTokenAddress, dbTx) } else { if ret.Get(0) != nil { @@ -843,7 +843,7 @@ func (_m *bridgeServiceStorageMock) GetTokenWrapped(ctx context.Context, origina } } - if rf, ok := ret.Get(1).(func(context.Context, uint, common.Address, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Address, pgx.Tx) error); ok { r1 = rf(ctx, originalNetwork, originalTokenAddress, dbTx) } else { r1 = ret.Error(1) @@ -859,16 +859,16 @@ type bridgeServiceStorageMock_GetTokenWrapped_Call struct { // GetTokenWrapped is a helper method to define mock.On call // - ctx context.Context -// - originalNetwork uint +// - originalNetwork uint32 // - originalTokenAddress common.Address // - dbTx pgx.Tx func (_e *bridgeServiceStorageMock_Expecter) GetTokenWrapped(ctx interface{}, originalNetwork interface{}, originalTokenAddress interface{}, dbTx interface{}) *bridgeServiceStorageMock_GetTokenWrapped_Call { return &bridgeServiceStorageMock_GetTokenWrapped_Call{Call: _e.mock.On("GetTokenWrapped", ctx, originalNetwork, originalTokenAddress, dbTx)} } -func (_c *bridgeServiceStorageMock_GetTokenWrapped_Call) Run(run func(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetTokenWrapped_Call { +func (_c *bridgeServiceStorageMock_GetTokenWrapped_Call) Run(run func(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx pgx.Tx)) *bridgeServiceStorageMock_GetTokenWrapped_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(common.Address), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(common.Address), args[3].(pgx.Tx)) }) return _c } @@ -878,7 +878,7 @@ func (_c *bridgeServiceStorageMock_GetTokenWrapped_Call) Return(_a0 *etherman.To return _c } -func (_c *bridgeServiceStorageMock_GetTokenWrapped_Call) RunAndReturn(run func(context.Context, uint, common.Address, pgx.Tx) (*etherman.TokenWrapped, error)) *bridgeServiceStorageMock_GetTokenWrapped_Call { +func (_c *bridgeServiceStorageMock_GetTokenWrapped_Call) RunAndReturn(run func(context.Context, uint32, common.Address, pgx.Tx) (*etherman.TokenWrapped, error)) *bridgeServiceStorageMock_GetTokenWrapped_Call { _c.Call.Return(run) return _c } diff --git a/server/service.go b/server/service.go index b13e32ef..d24e8a5e 100644 --- a/server/service.go +++ b/server/service.go @@ -17,7 +17,7 @@ import ( type bridgeService struct { storage bridgeServiceStorage - networkIDs map[uint]uint8 + networkIDs map[uint32]uint8 height uint8 defaultPageLimit uint32 maxPageLimit uint32 @@ -27,8 +27,8 @@ type bridgeService struct { } // NewBridgeService creates new bridge service. -func NewBridgeService(cfg Config, height uint8, networks []uint, storage interface{}) *bridgeService { - var networkIDs = make(map[uint]uint8) +func NewBridgeService(cfg Config, height uint8, networks []uint32, storage interface{}) *bridgeService { + var networkIDs = make(map[uint32]uint8) for i, network := range networks { networkIDs[network] = uint8(i) } @@ -64,7 +64,7 @@ func (s *bridgeService) getNode(ctx context.Context, parentHash [bridgectrl.KeyL } // getProof returns the merkle proof for a given index and root. -func (s *bridgeService) getProof(index uint, root [bridgectrl.KeyLen]byte, dbTx pgx.Tx) ([][bridgectrl.KeyLen]byte, error) { +func (s *bridgeService) getProof(index uint32, root [bridgectrl.KeyLen]byte, dbTx pgx.Tx) ([][bridgectrl.KeyLen]byte, error) { var siblings [][bridgectrl.KeyLen]byte cur := root @@ -115,7 +115,7 @@ func (s *bridgeService) getProof(index uint, root [bridgectrl.KeyLen]byte, dbTx } // getRollupExitProof returns the merkle proof for the zkevm leaf. -func (s *bridgeService) getRollupExitProof(rollupIndex uint, root common.Hash, dbTx pgx.Tx) ([][bridgectrl.KeyLen]byte, common.Hash, error) { +func (s *bridgeService) getRollupExitProof(rollupIndex uint32, root common.Hash, dbTx pgx.Tx) ([][bridgectrl.KeyLen]byte, common.Hash, error) { ctx := context.Background() // Get leaves given the root @@ -148,7 +148,7 @@ func (s *bridgeService) getRollupExitProof(rollupIndex uint, root common.Hash, d } // GetClaimProof returns the merkle proof to claim the given deposit. -func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { +func (s *bridgeService) GetClaimProof(depositCnt, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { ctx := context.Background() deposit, err := s.storage.GetDeposit(ctx, depositCnt, networkID, dbTx) @@ -194,7 +194,7 @@ func (s *bridgeService) GetClaimProof(depositCnt, networkID uint, dbTx pgx.Tx) ( } // GetClaimProofbyGER returns the merkle proof to claim the given deposit. -func (s *bridgeService) GetClaimProofbyGER(depositCnt, networkID uint, GER common.Hash, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { +func (s *bridgeService) GetClaimProofbyGER(depositCnt, networkID uint32, GER common.Hash, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { ctx := context.Background() if dbTx == nil { // if the call comes from the rest API @@ -245,7 +245,7 @@ func (s *bridgeService) GetClaimProofbyGER(depositCnt, networkID uint, GER commo } // GetClaimProofForCompressed returns the merkle proof to claim the given deposit. -func (s *bridgeService) GetClaimProofForCompressed(ger common.Hash, depositCnt, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { +func (s *bridgeService) GetClaimProofForCompressed(ger common.Hash, depositCnt, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, [][bridgectrl.KeyLen]byte, [][bridgectrl.KeyLen]byte, error) { ctx := context.Background() if dbTx == nil { // if the call comes from the rest API @@ -301,7 +301,7 @@ func emptyProof() [][bridgectrl.KeyLen]byte { } // GetDepositStatus returns deposit with ready_for_claim status. -func (s *bridgeService) GetDepositStatus(ctx context.Context, depositCount uint, originNetworkID, destNetworkID uint) (string, error) { +func (s *bridgeService) GetDepositStatus(ctx context.Context, depositCount, originNetworkID, destNetworkID uint32) (string, error) { var ( claimTxHash string ) @@ -339,7 +339,7 @@ func (s *bridgeService) GetBridges(ctx context.Context, req *pb.GetBridgesReques if err != nil { return nil, err } - deposits, err := s.storage.GetDeposits(ctx, req.DestAddr, uint(limit), uint(req.Offset), nil) + deposits, err := s.storage.GetDeposits(ctx, req.DestAddr, limit, req.Offset, nil) if err != nil { return nil, err } @@ -351,7 +351,7 @@ func (s *bridgeService) GetBridges(ctx context.Context, req *pb.GetBridgesReques return nil, err } mainnetFlag := deposit.NetworkID == 0 - var rollupIndex uint + var rollupIndex uint32 if !mainnetFlag { rollupIndex = deposit.NetworkID - 1 } @@ -359,14 +359,14 @@ func (s *bridgeService) GetBridges(ctx context.Context, req *pb.GetBridgesReques pbDeposits = append( pbDeposits, &pb.Deposit{ LeafType: uint32(deposit.LeafType), - OrigNet: uint32(deposit.OriginalNetwork), + OrigNet: deposit.OriginalNetwork, OrigAddr: deposit.OriginalAddress.Hex(), Amount: deposit.Amount.String(), - DestNet: uint32(deposit.DestinationNetwork), + DestNet: deposit.DestinationNetwork, DestAddr: deposit.DestinationAddress.Hex(), BlockNum: deposit.BlockNumber, - DepositCnt: uint64(deposit.DepositCount), - NetworkId: uint32(deposit.NetworkID), + DepositCnt: deposit.DepositCount, + NetworkId: deposit.NetworkID, TxHash: deposit.TxHash.String(), ClaimTxHash: claimTxHash, Metadata: "0x" + hex.EncodeToString(deposit.Metadata), @@ -396,7 +396,7 @@ func (s *bridgeService) GetClaims(ctx context.Context, req *pb.GetClaimsRequest) if err != nil { return nil, err } - claims, err := s.storage.GetClaims(ctx, req.DestAddr, uint(limit), uint(req.Offset), nil) //nolint:gomnd + claims, err := s.storage.GetClaims(ctx, req.DestAddr, limit, req.Offset, nil) //nolint:gomnd if err != nil { return nil, err } @@ -404,11 +404,11 @@ func (s *bridgeService) GetClaims(ctx context.Context, req *pb.GetClaimsRequest) var pbClaims []*pb.Claim for _, claim := range claims { pbClaims = append(pbClaims, &pb.Claim{ - Index: uint64(claim.Index), - OrigNet: uint32(claim.OriginalNetwork), + Index: claim.Index, + OrigNet: claim.OriginalNetwork, OrigAddr: claim.OriginalAddress.Hex(), Amount: claim.Amount.String(), - NetworkId: uint32(claim.NetworkID), + NetworkId: claim.NetworkID, DestAddr: claim.DestinationAddress.Hex(), BlockNum: claim.BlockNumber, TxHash: claim.TxHash.String(), @@ -426,7 +426,7 @@ func (s *bridgeService) GetClaims(ctx context.Context, req *pb.GetClaimsRequest) // GetProof returns the merkle proof for the given deposit. // Bridge rest API endpoint func (s *bridgeService) GetProof(ctx context.Context, req *pb.GetProofRequest) (*pb.GetProofResponse, error) { - globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProof(uint(req.DepositCnt), uint(req.NetId), nil) + globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProof(req.DepositCnt, req.NetId, nil) if err != nil { return nil, err } @@ -455,12 +455,12 @@ func (s *bridgeService) GetProof(ctx context.Context, req *pb.GetProofRequest) ( // GetBridge returns the bridge with status whether it is able to send a claim transaction or not. // Bridge rest API endpoint func (s *bridgeService) GetBridge(ctx context.Context, req *pb.GetBridgeRequest) (*pb.GetBridgeResponse, error) { - deposit, err := s.storage.GetDeposit(ctx, uint(req.DepositCnt), uint(req.NetId), nil) + deposit, err := s.storage.GetDeposit(ctx, req.DepositCnt, req.NetId, nil) if err != nil { return nil, err } - claimTxHash, err := s.GetDepositStatus(ctx, uint(req.DepositCnt), deposit.NetworkID, deposit.DestinationNetwork) + claimTxHash, err := s.GetDepositStatus(ctx, req.DepositCnt, deposit.NetworkID, deposit.DestinationNetwork) if err != nil { return nil, err } @@ -468,14 +468,14 @@ func (s *bridgeService) GetBridge(ctx context.Context, req *pb.GetBridgeRequest) return &pb.GetBridgeResponse{ Deposit: &pb.Deposit{ LeafType: uint32(deposit.LeafType), - OrigNet: uint32(deposit.OriginalNetwork), + OrigNet: deposit.OriginalNetwork, OrigAddr: deposit.OriginalAddress.Hex(), Amount: deposit.Amount.String(), - DestNet: uint32(deposit.DestinationNetwork), + DestNet: deposit.DestinationNetwork, DestAddr: deposit.DestinationAddress.Hex(), BlockNum: deposit.BlockNumber, - DepositCnt: uint64(deposit.DepositCount), - NetworkId: uint32(deposit.NetworkID), + DepositCnt: deposit.DepositCount, + NetworkId: deposit.NetworkID, TxHash: deposit.TxHash.String(), ClaimTxHash: claimTxHash, Metadata: "0x" + hex.EncodeToString(deposit.Metadata), @@ -487,7 +487,7 @@ func (s *bridgeService) GetBridge(ctx context.Context, req *pb.GetBridgeRequest) // GetTokenWrapped returns the token wrapped created for a specific network // Bridge rest API endpoint func (s *bridgeService) GetTokenWrapped(ctx context.Context, req *pb.GetTokenWrappedRequest) (*pb.GetTokenWrappedResponse, error) { - tokenWrapped, err := s.storage.GetTokenWrapped(ctx, uint(req.OrigNet), common.HexToAddress(req.OrigTokenAddr), nil) + tokenWrapped, err := s.storage.GetTokenWrapped(ctx, req.OrigNet, common.HexToAddress(req.OrigTokenAddr), nil) if err != nil { return nil, err } @@ -506,7 +506,7 @@ func (s *bridgeService) GetTokenWrapped(ctx context.Context, req *pb.GetTokenWra func (s *bridgeService) GetProofByGER(ctx context.Context, req *pb.GetProofByGERRequest) (*pb.GetProofResponse, error) { ger := common.HexToHash(req.Ger) - globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProofbyGER(uint(req.DepositCnt), uint(req.NetId), ger, nil) + globalExitRoot, merkleProof, rollupMerkleProof, err := s.GetClaimProofbyGER(req.DepositCnt, req.NetId, ger, nil) if err != nil { return nil, err } @@ -551,7 +551,7 @@ func (s *bridgeService) GetPendingBridgesToClaim(ctx context.Context, req *pb.Ge var pbDeposits []*pb.Deposit for _, deposit := range deposits { mainnetFlag := deposit.NetworkID == 0 - var rollupIndex uint + var rollupIndex uint32 if !mainnetFlag { rollupIndex = deposit.NetworkID - 1 } @@ -559,14 +559,14 @@ func (s *bridgeService) GetPendingBridgesToClaim(ctx context.Context, req *pb.Ge pbDeposits = append( pbDeposits, &pb.Deposit{ LeafType: uint32(deposit.LeafType), - OrigNet: uint32(deposit.OriginalNetwork), + OrigNet: deposit.OriginalNetwork, OrigAddr: deposit.OriginalAddress.Hex(), Amount: deposit.Amount.String(), - DestNet: uint32(deposit.DestinationNetwork), + DestNet: deposit.DestinationNetwork, DestAddr: deposit.DestinationAddress.Hex(), BlockNum: deposit.BlockNumber, - DepositCnt: uint64(deposit.DepositCount), - NetworkId: uint32(deposit.NetworkID), + DepositCnt: deposit.DepositCount, + NetworkId: deposit.NetworkID, TxHash: deposit.TxHash.String(), ClaimTxHash: "", Metadata: "0x" + hex.EncodeToString(deposit.Metadata), diff --git a/server/service_test.go b/server/service_test.go index c8c4afc0..8ec0be91 100644 --- a/server/service_test.go +++ b/server/service_test.go @@ -14,9 +14,11 @@ func TestGetClaimProofbyGER(t *testing.T) { CacheSize: 32, } mockStorage := newBridgeServiceStorageMock(t) - sut := NewBridgeService(cfg, 32, []uint{0, 1}, mockStorage) - depositCnt := uint(0) - networkID := uint(0) + sut := NewBridgeService(cfg, 32, []uint32{0, 1}, mockStorage) + var ( + depositCnt uint32 + networkID uint32 + ) GER := common.Hash{} deposit := ðerman.Deposit{} mockStorage.EXPECT().GetDeposit(mock.Anything, depositCnt, networkID, mock.Anything).Return(deposit, nil) diff --git a/synchronizer/interfaces.go b/synchronizer/interfaces.go index 488311d4..efe9f5b1 100644 --- a/synchronizer/interfaces.go +++ b/synchronizer/interfaces.go @@ -16,11 +16,11 @@ type ethermanInterface interface { HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) EthBlockByNumber(ctx context.Context, blockNumber uint64) (*types.Block, error) - GetNetworkID() uint + GetNetworkID() uint32 } type storageInterface interface { - GetLastBlock(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.Block, error) + GetLastBlock(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.Block, error) Rollback(ctx context.Context, dbTx pgx.Tx) error BeginDBTransaction(ctx context.Context) (pgx.Tx, error) Commit(ctx context.Context, dbTx pgx.Tx) error @@ -29,18 +29,17 @@ type storageInterface interface { AddDeposit(ctx context.Context, deposit *etherman.Deposit, dbTx pgx.Tx) (uint64, error) AddClaim(ctx context.Context, claim *etherman.Claim, dbTx pgx.Tx) error AddTokenWrapped(ctx context.Context, tokenWrapped *etherman.TokenWrapped, dbTx pgx.Tx) error - Reset(ctx context.Context, blockNumber uint64, networkID uint, dbTx pgx.Tx) error - GetPreviousBlock(ctx context.Context, networkID uint, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) - GetNumberDeposits(ctx context.Context, origNetworkID uint, blockNumber uint64, dbTx pgx.Tx) (uint64, error) + Reset(ctx context.Context, blockNumber uint64, networkID uint32, dbTx pgx.Tx) error + GetPreviousBlock(ctx context.Context, networkID uint32, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) + GetNumberDeposits(ctx context.Context, origNetworkID uint32, blockNumber uint64, dbTx pgx.Tx) (uint32, error) AddTrustedGlobalExitRoot(ctx context.Context, trustedExitRoot *etherman.GlobalExitRoot, dbTx pgx.Tx) (bool, error) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) - CheckIfRootExists(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (bool, error) + CheckIfRootExists(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (bool, error) } type bridgectrlInterface interface { AddDeposit(ctx context.Context, deposit *etherman.Deposit, depositID uint64, dbTx pgx.Tx) error - ReorgMT(ctx context.Context, depositCount, networkID uint, dbTx pgx.Tx) error - GetNetworkID(networkID uint) (uint8, error) + ReorgMT(ctx context.Context, depositCount, networkID uint32, dbTx pgx.Tx) error AddRollupExitLeaf(ctx context.Context, rollupLeaf etherman.RollupExitLeaf, dbTx pgx.Tx) error } diff --git a/synchronizer/mock_bridgectrl.go b/synchronizer/mock_bridgectrl.go index 6f5943d4..a1f85858 100644 --- a/synchronizer/mock_bridgectrl.go +++ b/synchronizer/mock_bridgectrl.go @@ -121,64 +121,8 @@ func (_c *bridgectrlMock_AddRollupExitLeaf_Call) RunAndReturn(run func(context.C return _c } -// GetNetworkID provides a mock function with given fields: networkID -func (_m *bridgectrlMock) GetNetworkID(networkID uint) (uint8, error) { - ret := _m.Called(networkID) - - if len(ret) == 0 { - panic("no return value specified for GetNetworkID") - } - - var r0 uint8 - var r1 error - if rf, ok := ret.Get(0).(func(uint) (uint8, error)); ok { - return rf(networkID) - } - if rf, ok := ret.Get(0).(func(uint) uint8); ok { - r0 = rf(networkID) - } else { - r0 = ret.Get(0).(uint8) - } - - if rf, ok := ret.Get(1).(func(uint) error); ok { - r1 = rf(networkID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// bridgectrlMock_GetNetworkID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNetworkID' -type bridgectrlMock_GetNetworkID_Call struct { - *mock.Call -} - -// GetNetworkID is a helper method to define mock.On call -// - networkID uint -func (_e *bridgectrlMock_Expecter) GetNetworkID(networkID interface{}) *bridgectrlMock_GetNetworkID_Call { - return &bridgectrlMock_GetNetworkID_Call{Call: _e.mock.On("GetNetworkID", networkID)} -} - -func (_c *bridgectrlMock_GetNetworkID_Call) Run(run func(networkID uint)) *bridgectrlMock_GetNetworkID_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint)) - }) - return _c -} - -func (_c *bridgectrlMock_GetNetworkID_Call) Return(_a0 uint8, _a1 error) *bridgectrlMock_GetNetworkID_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *bridgectrlMock_GetNetworkID_Call) RunAndReturn(run func(uint) (uint8, error)) *bridgectrlMock_GetNetworkID_Call { - _c.Call.Return(run) - return _c -} - // ReorgMT provides a mock function with given fields: ctx, depositCount, networkID, dbTx -func (_m *bridgectrlMock) ReorgMT(ctx context.Context, depositCount uint, networkID uint, dbTx pgx.Tx) error { +func (_m *bridgectrlMock) ReorgMT(ctx context.Context, depositCount uint32, networkID uint32, dbTx pgx.Tx) error { ret := _m.Called(ctx, depositCount, networkID, dbTx) if len(ret) == 0 { @@ -186,7 +130,7 @@ func (_m *bridgectrlMock) ReorgMT(ctx context.Context, depositCount uint, networ } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint32, pgx.Tx) error); ok { r0 = rf(ctx, depositCount, networkID, dbTx) } else { r0 = ret.Error(0) @@ -202,16 +146,16 @@ type bridgectrlMock_ReorgMT_Call struct { // ReorgMT is a helper method to define mock.On call // - ctx context.Context -// - depositCount uint -// - networkID uint +// - depositCount uint32 +// - networkID uint32 // - dbTx pgx.Tx func (_e *bridgectrlMock_Expecter) ReorgMT(ctx interface{}, depositCount interface{}, networkID interface{}, dbTx interface{}) *bridgectrlMock_ReorgMT_Call { return &bridgectrlMock_ReorgMT_Call{Call: _e.mock.On("ReorgMT", ctx, depositCount, networkID, dbTx)} } -func (_c *bridgectrlMock_ReorgMT_Call) Run(run func(ctx context.Context, depositCount uint, networkID uint, dbTx pgx.Tx)) *bridgectrlMock_ReorgMT_Call { +func (_c *bridgectrlMock_ReorgMT_Call) Run(run func(ctx context.Context, depositCount uint32, networkID uint32, dbTx pgx.Tx)) *bridgectrlMock_ReorgMT_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -221,7 +165,7 @@ func (_c *bridgectrlMock_ReorgMT_Call) Return(_a0 error) *bridgectrlMock_ReorgMT return _c } -func (_c *bridgectrlMock_ReorgMT_Call) RunAndReturn(run func(context.Context, uint, uint, pgx.Tx) error) *bridgectrlMock_ReorgMT_Call { +func (_c *bridgectrlMock_ReorgMT_Call) RunAndReturn(run func(context.Context, uint32, uint32, pgx.Tx) error) *bridgectrlMock_ReorgMT_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/mock_etherman.go b/synchronizer/mock_etherman.go index 6b13f61d..32f6c35c 100644 --- a/synchronizer/mock_etherman.go +++ b/synchronizer/mock_etherman.go @@ -88,18 +88,18 @@ func (_c *ethermanMock_EthBlockByNumber_Call) RunAndReturn(run func(context.Cont } // GetNetworkID provides a mock function with given fields: -func (_m *ethermanMock) GetNetworkID() uint { +func (_m *ethermanMock) GetNetworkID() uint32 { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for GetNetworkID") } - var r0 uint - if rf, ok := ret.Get(0).(func() uint); ok { + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { r0 = rf() } else { - r0 = ret.Get(0).(uint) + r0 = ret.Get(0).(uint32) } return r0 @@ -122,12 +122,12 @@ func (_c *ethermanMock_GetNetworkID_Call) Run(run func()) *ethermanMock_GetNetwo return _c } -func (_c *ethermanMock_GetNetworkID_Call) Return(_a0 uint) *ethermanMock_GetNetworkID_Call { +func (_c *ethermanMock_GetNetworkID_Call) Return(_a0 uint32) *ethermanMock_GetNetworkID_Call { _c.Call.Return(_a0) return _c } -func (_c *ethermanMock_GetNetworkID_Call) RunAndReturn(run func() uint) *ethermanMock_GetNetworkID_Call { +func (_c *ethermanMock_GetNetworkID_Call) RunAndReturn(run func() uint32) *ethermanMock_GetNetworkID_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/mock_storage.go b/synchronizer/mock_storage.go index 89468d74..06bac9b0 100644 --- a/synchronizer/mock_storage.go +++ b/synchronizer/mock_storage.go @@ -401,7 +401,7 @@ func (_c *storageMock_BeginDBTransaction_Call) RunAndReturn(run func(context.Con } // CheckIfRootExists provides a mock function with given fields: ctx, root, network, dbTx -func (_m *storageMock) CheckIfRootExists(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (bool, error) { +func (_m *storageMock) CheckIfRootExists(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (bool, error) { ret := _m.Called(ctx, root, network, dbTx) if len(ret) == 0 { @@ -410,16 +410,16 @@ func (_m *storageMock) CheckIfRootExists(ctx context.Context, root []byte, netwo var r0 bool var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint8, pgx.Tx) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) (bool, error)); ok { return rf(ctx, root, network, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, []byte, uint8, pgx.Tx) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, []byte, uint32, pgx.Tx) bool); ok { r0 = rf(ctx, root, network, dbTx) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func(context.Context, []byte, uint8, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, []byte, uint32, pgx.Tx) error); ok { r1 = rf(ctx, root, network, dbTx) } else { r1 = ret.Error(1) @@ -436,15 +436,15 @@ type storageMock_CheckIfRootExists_Call struct { // CheckIfRootExists is a helper method to define mock.On call // - ctx context.Context // - root []byte -// - network uint8 +// - network uint32 // - dbTx pgx.Tx func (_e *storageMock_Expecter) CheckIfRootExists(ctx interface{}, root interface{}, network interface{}, dbTx interface{}) *storageMock_CheckIfRootExists_Call { return &storageMock_CheckIfRootExists_Call{Call: _e.mock.On("CheckIfRootExists", ctx, root, network, dbTx)} } -func (_c *storageMock_CheckIfRootExists_Call) Run(run func(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx)) *storageMock_CheckIfRootExists_Call { +func (_c *storageMock_CheckIfRootExists_Call) Run(run func(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx)) *storageMock_CheckIfRootExists_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]byte), args[2].(uint8), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].([]byte), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -454,7 +454,7 @@ func (_c *storageMock_CheckIfRootExists_Call) Return(_a0 bool, _a1 error) *stora return _c } -func (_c *storageMock_CheckIfRootExists_Call) RunAndReturn(run func(context.Context, []byte, uint8, pgx.Tx) (bool, error)) *storageMock_CheckIfRootExists_Call { +func (_c *storageMock_CheckIfRootExists_Call) RunAndReturn(run func(context.Context, []byte, uint32, pgx.Tx) (bool, error)) *storageMock_CheckIfRootExists_Call { _c.Call.Return(run) return _c } @@ -507,7 +507,7 @@ func (_c *storageMock_Commit_Call) RunAndReturn(run func(context.Context, pgx.Tx } // GetLastBlock provides a mock function with given fields: ctx, networkID, dbTx -func (_m *storageMock) GetLastBlock(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.Block, error) { +func (_m *storageMock) GetLastBlock(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.Block, error) { ret := _m.Called(ctx, networkID, dbTx) if len(ret) == 0 { @@ -516,10 +516,10 @@ func (_m *storageMock) GetLastBlock(ctx context.Context, networkID uint, dbTx pg var r0 *etherman.Block var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, pgx.Tx) (*etherman.Block, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, pgx.Tx) (*etherman.Block, error)); ok { return rf(ctx, networkID, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, pgx.Tx) *etherman.Block); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, pgx.Tx) *etherman.Block); ok { r0 = rf(ctx, networkID, dbTx) } else { if ret.Get(0) != nil { @@ -527,7 +527,7 @@ func (_m *storageMock) GetLastBlock(ctx context.Context, networkID uint, dbTx pg } } - if rf, ok := ret.Get(1).(func(context.Context, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, pgx.Tx) error); ok { r1 = rf(ctx, networkID, dbTx) } else { r1 = ret.Error(1) @@ -543,15 +543,15 @@ type storageMock_GetLastBlock_Call struct { // GetLastBlock is a helper method to define mock.On call // - ctx context.Context -// - networkID uint +// - networkID uint32 // - dbTx pgx.Tx func (_e *storageMock_Expecter) GetLastBlock(ctx interface{}, networkID interface{}, dbTx interface{}) *storageMock_GetLastBlock_Call { return &storageMock_GetLastBlock_Call{Call: _e.mock.On("GetLastBlock", ctx, networkID, dbTx)} } -func (_c *storageMock_GetLastBlock_Call) Run(run func(ctx context.Context, networkID uint, dbTx pgx.Tx)) *storageMock_GetLastBlock_Call { +func (_c *storageMock_GetLastBlock_Call) Run(run func(ctx context.Context, networkID uint32, dbTx pgx.Tx)) *storageMock_GetLastBlock_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(pgx.Tx)) }) return _c } @@ -561,7 +561,7 @@ func (_c *storageMock_GetLastBlock_Call) Return(_a0 *etherman.Block, _a1 error) return _c } -func (_c *storageMock_GetLastBlock_Call) RunAndReturn(run func(context.Context, uint, pgx.Tx) (*etherman.Block, error)) *storageMock_GetLastBlock_Call { +func (_c *storageMock_GetLastBlock_Call) RunAndReturn(run func(context.Context, uint32, pgx.Tx) (*etherman.Block, error)) *storageMock_GetLastBlock_Call { _c.Call.Return(run) return _c } @@ -626,25 +626,25 @@ func (_c *storageMock_GetLatestL1SyncedExitRoot_Call) RunAndReturn(run func(cont } // GetNumberDeposits provides a mock function with given fields: ctx, origNetworkID, blockNumber, dbTx -func (_m *storageMock) GetNumberDeposits(ctx context.Context, origNetworkID uint, blockNumber uint64, dbTx pgx.Tx) (uint64, error) { +func (_m *storageMock) GetNumberDeposits(ctx context.Context, origNetworkID uint32, blockNumber uint64, dbTx pgx.Tx) (uint32, error) { ret := _m.Called(ctx, origNetworkID, blockNumber, dbTx) if len(ret) == 0 { panic("no return value specified for GetNumberDeposits") } - var r0 uint64 + var r0 uint32 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint64, pgx.Tx) (uint64, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint64, pgx.Tx) (uint32, error)); ok { return rf(ctx, origNetworkID, blockNumber, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint64, pgx.Tx) uint64); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint64, pgx.Tx) uint32); ok { r0 = rf(ctx, origNetworkID, blockNumber, dbTx) } else { - r0 = ret.Get(0).(uint64) + r0 = ret.Get(0).(uint32) } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint64, pgx.Tx) error); ok { r1 = rf(ctx, origNetworkID, blockNumber, dbTx) } else { r1 = ret.Error(1) @@ -660,32 +660,32 @@ type storageMock_GetNumberDeposits_Call struct { // GetNumberDeposits is a helper method to define mock.On call // - ctx context.Context -// - origNetworkID uint +// - origNetworkID uint32 // - blockNumber uint64 // - dbTx pgx.Tx func (_e *storageMock_Expecter) GetNumberDeposits(ctx interface{}, origNetworkID interface{}, blockNumber interface{}, dbTx interface{}) *storageMock_GetNumberDeposits_Call { return &storageMock_GetNumberDeposits_Call{Call: _e.mock.On("GetNumberDeposits", ctx, origNetworkID, blockNumber, dbTx)} } -func (_c *storageMock_GetNumberDeposits_Call) Run(run func(ctx context.Context, origNetworkID uint, blockNumber uint64, dbTx pgx.Tx)) *storageMock_GetNumberDeposits_Call { +func (_c *storageMock_GetNumberDeposits_Call) Run(run func(ctx context.Context, origNetworkID uint32, blockNumber uint64, dbTx pgx.Tx)) *storageMock_GetNumberDeposits_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint64), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint64), args[3].(pgx.Tx)) }) return _c } -func (_c *storageMock_GetNumberDeposits_Call) Return(_a0 uint64, _a1 error) *storageMock_GetNumberDeposits_Call { +func (_c *storageMock_GetNumberDeposits_Call) Return(_a0 uint32, _a1 error) *storageMock_GetNumberDeposits_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *storageMock_GetNumberDeposits_Call) RunAndReturn(run func(context.Context, uint, uint64, pgx.Tx) (uint64, error)) *storageMock_GetNumberDeposits_Call { +func (_c *storageMock_GetNumberDeposits_Call) RunAndReturn(run func(context.Context, uint32, uint64, pgx.Tx) (uint32, error)) *storageMock_GetNumberDeposits_Call { _c.Call.Return(run) return _c } // GetPreviousBlock provides a mock function with given fields: ctx, networkID, offset, dbTx -func (_m *storageMock) GetPreviousBlock(ctx context.Context, networkID uint, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) { +func (_m *storageMock) GetPreviousBlock(ctx context.Context, networkID uint32, offset uint64, dbTx pgx.Tx) (*etherman.Block, error) { ret := _m.Called(ctx, networkID, offset, dbTx) if len(ret) == 0 { @@ -694,10 +694,10 @@ func (_m *storageMock) GetPreviousBlock(ctx context.Context, networkID uint, off var r0 *etherman.Block var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint, uint64, pgx.Tx) (*etherman.Block, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint64, pgx.Tx) (*etherman.Block, error)); ok { return rf(ctx, networkID, offset, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint, uint64, pgx.Tx) *etherman.Block); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint32, uint64, pgx.Tx) *etherman.Block); ok { r0 = rf(ctx, networkID, offset, dbTx) } else { if ret.Get(0) != nil { @@ -705,7 +705,7 @@ func (_m *storageMock) GetPreviousBlock(ctx context.Context, networkID uint, off } } - if rf, ok := ret.Get(1).(func(context.Context, uint, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint32, uint64, pgx.Tx) error); ok { r1 = rf(ctx, networkID, offset, dbTx) } else { r1 = ret.Error(1) @@ -721,16 +721,16 @@ type storageMock_GetPreviousBlock_Call struct { // GetPreviousBlock is a helper method to define mock.On call // - ctx context.Context -// - networkID uint +// - networkID uint32 // - offset uint64 // - dbTx pgx.Tx func (_e *storageMock_Expecter) GetPreviousBlock(ctx interface{}, networkID interface{}, offset interface{}, dbTx interface{}) *storageMock_GetPreviousBlock_Call { return &storageMock_GetPreviousBlock_Call{Call: _e.mock.On("GetPreviousBlock", ctx, networkID, offset, dbTx)} } -func (_c *storageMock_GetPreviousBlock_Call) Run(run func(ctx context.Context, networkID uint, offset uint64, dbTx pgx.Tx)) *storageMock_GetPreviousBlock_Call { +func (_c *storageMock_GetPreviousBlock_Call) Run(run func(ctx context.Context, networkID uint32, offset uint64, dbTx pgx.Tx)) *storageMock_GetPreviousBlock_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint), args[2].(uint64), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint32), args[2].(uint64), args[3].(pgx.Tx)) }) return _c } @@ -740,13 +740,13 @@ func (_c *storageMock_GetPreviousBlock_Call) Return(_a0 *etherman.Block, _a1 err return _c } -func (_c *storageMock_GetPreviousBlock_Call) RunAndReturn(run func(context.Context, uint, uint64, pgx.Tx) (*etherman.Block, error)) *storageMock_GetPreviousBlock_Call { +func (_c *storageMock_GetPreviousBlock_Call) RunAndReturn(run func(context.Context, uint32, uint64, pgx.Tx) (*etherman.Block, error)) *storageMock_GetPreviousBlock_Call { _c.Call.Return(run) return _c } // Reset provides a mock function with given fields: ctx, blockNumber, networkID, dbTx -func (_m *storageMock) Reset(ctx context.Context, blockNumber uint64, networkID uint, dbTx pgx.Tx) error { +func (_m *storageMock) Reset(ctx context.Context, blockNumber uint64, networkID uint32, dbTx pgx.Tx) error { ret := _m.Called(ctx, blockNumber, networkID, dbTx) if len(ret) == 0 { @@ -754,7 +754,7 @@ func (_m *storageMock) Reset(ctx context.Context, blockNumber uint64, networkID } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint32, pgx.Tx) error); ok { r0 = rf(ctx, blockNumber, networkID, dbTx) } else { r0 = ret.Error(0) @@ -771,15 +771,15 @@ type storageMock_Reset_Call struct { // Reset is a helper method to define mock.On call // - ctx context.Context // - blockNumber uint64 -// - networkID uint +// - networkID uint32 // - dbTx pgx.Tx func (_e *storageMock_Expecter) Reset(ctx interface{}, blockNumber interface{}, networkID interface{}, dbTx interface{}) *storageMock_Reset_Call { return &storageMock_Reset_Call{Call: _e.mock.On("Reset", ctx, blockNumber, networkID, dbTx)} } -func (_c *storageMock_Reset_Call) Run(run func(ctx context.Context, blockNumber uint64, networkID uint, dbTx pgx.Tx)) *storageMock_Reset_Call { +func (_c *storageMock_Reset_Call) Run(run func(ctx context.Context, blockNumber uint64, networkID uint32, dbTx pgx.Tx)) *storageMock_Reset_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint), args[3].(pgx.Tx)) + run(args[0].(context.Context), args[1].(uint64), args[2].(uint32), args[3].(pgx.Tx)) }) return _c } @@ -789,7 +789,7 @@ func (_c *storageMock_Reset_Call) Return(_a0 error) *storageMock_Reset_Call { return _c } -func (_c *storageMock_Reset_Call) RunAndReturn(run func(context.Context, uint64, uint, pgx.Tx) error) *storageMock_Reset_Call { +func (_c *storageMock_Reset_Call) RunAndReturn(run func(context.Context, uint64, uint32, pgx.Tx) error) *storageMock_Reset_Call { _c.Call.Return(run) return _c } diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 5763c35b..0be6dc05 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -29,14 +29,14 @@ type ClientSynchronizer struct { cancelCtx context.CancelFunc genBlockNumber uint64 cfg Config - networkID uint + networkID uint32 chExitRootEventL2 chan *etherman.GlobalExitRoot chsExitRootEvent []chan *etherman.GlobalExitRoot - chSynced chan uint + chSynced chan uint32 zkEVMClient zkEVMClientInterface synced bool l1RollupExitRoot common.Hash - allNetworkIDs []uint + allNetworkIDs []uint32 } // NewSynchronizer creates and initializes an instance of Synchronizer @@ -49,9 +49,9 @@ func NewSynchronizer( genBlockNumber uint64, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, - chSynced chan uint, + chSynced chan uint32, cfg Config, - allNetworkIDs []uint) (Synchronizer, error) { + allNetworkIDs []uint32) (Synchronizer, error) { ctx, cancel := context.WithCancel(parentCtx) networkID := ethMan.GetNetworkID() ger, err := storage.(storageInterface).GetLatestL1SyncedExitRoot(ctx, nil) @@ -498,7 +498,7 @@ func (s *ClientSynchronizer) resetState(blockNumber uint64) error { return err } - err = s.bridgeCtrl.ReorgMT(s.ctx, uint(depositCnt), s.networkID, dbTx) + err = s.bridgeCtrl.ReorgMT(s.ctx, depositCnt, s.networkID, dbTx) if err != nil { log.Error("networkID: %d, error resetting ReorgMT the state. Error: %v", s.networkID, err) rollbackErr := s.storage.Rollback(s.ctx, dbTx) @@ -611,7 +611,7 @@ func (s *ClientSynchronizer) processVerifyBatch(verifyBatch etherman.VerifiedBat } if isRollupSyncing { // Just check that the calculated RollupExitRoot is fine - ok, err := s.storage.CheckIfRootExists(s.ctx, verifyBatch.LocalExitRoot.Bytes(), uint8(verifyBatch.RollupID), dbTx) + ok, err := s.storage.CheckIfRootExists(s.ctx, verifyBatch.LocalExitRoot.Bytes(), verifyBatch.RollupID, dbTx) if err != nil { log.Errorf("networkID: %d, error Checking if root exists. Error: %v", s.networkID, err) rollbackErr := s.storage.Rollback(s.ctx, dbTx) diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 9710ae28..9daaf9a1 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -34,7 +34,7 @@ func NewSynchronizerTest( genBlockNumber uint64, chExitRootEventL2 chan *etherman.GlobalExitRoot, chsExitRootEvent []chan *etherman.GlobalExitRoot, - chSynced chan uint, + chSynced chan uint32, cfg Config) (Synchronizer, error) { ctx, cancel := context.WithCancel(parentCtx) networkID := ethMan.GetNetworkID() @@ -87,10 +87,10 @@ func TestSyncGer(t *testing.T) { SyncChunkSize: 10, } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) parentCtx := context.Background() sync, err := NewSynchronizerTest(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -115,7 +115,7 @@ func TestSyncGer(t *testing.T) { ethBlock0 := types.NewBlockWithHeader(ethHeader0) ethBlock1 := types.NewBlockWithHeader(ethHeader1) lastBlock := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -223,10 +223,10 @@ func TestSyncTrustedGer(t *testing.T) { SyncChunkSize: 10, } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) - m.Etherman.On("GetNetworkID").Return(uint(1)) + m.Etherman.On("GetNetworkID").Return(uint32(1)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) parentCtx := context.Background() sync, err := NewSynchronizerTest(parentCtx, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -251,7 +251,7 @@ func TestSyncTrustedGer(t *testing.T) { ethBlock0 := types.NewBlockWithHeader(ethHeader0) ethBlock1 := types.NewBlockWithHeader(ethHeader1) lastBlock := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64()} - var networkID uint = 1 + var networkID uint32 = 1 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -365,10 +365,10 @@ func TestReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -403,7 +403,7 @@ func TestReorg(t *testing.T) { lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -475,14 +475,14 @@ func TestReorg(t *testing.T) { Return(nil). Once() - depositCnt := 1 + var depositCnt uint32 = 1 m.Storage. On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). - Return(uint64(depositCnt), nil). + Return(depositCnt, nil). Once() m.BridgeCtrl. - On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + On("ReorgMT", ctx, depositCnt, networkID, m.DbTx). Return(nil). Once() @@ -613,10 +613,10 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -645,7 +645,7 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -704,14 +704,14 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { Return(nil). Once() - depositCnt := 1 + var depositCnt uint32 = 1 m.Storage. On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). - Return(uint64(depositCnt), nil). + Return(depositCnt, nil). Once() m.BridgeCtrl. - On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + On("ReorgMT", ctx, depositCnt, networkID, m.DbTx). Return(nil). Once() @@ -773,10 +773,10 @@ func TestRegularReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -807,7 +807,7 @@ func TestRegularReorg(t *testing.T) { lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -854,14 +854,14 @@ func TestRegularReorg(t *testing.T) { Return(nil). Once() - depositCnt := 1 + var depositCnt uint32 = 1 m.Storage. On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). - Return(uint64(depositCnt), nil). + Return(depositCnt, nil). Once() m.BridgeCtrl. - On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + On("ReorgMT", ctx, depositCnt, networkID, m.DbTx). Return(nil). Once() @@ -983,10 +983,10 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -1018,7 +1018,7 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} lastBlock2 := ðerman.Block{BlockHash: ethBlock2.Hash(), BlockNumber: ethBlock2.Number().Uint64(), ParentHash: ethBlock2.ParentHash()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -1093,14 +1093,14 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { Return(nil). Once() - depositCnt := 1 + var depositCnt uint32 = 1 m.Storage. On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). - Return(uint64(depositCnt), nil). + Return(depositCnt, nil). Once() m.BridgeCtrl. - On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + On("ReorgMT", ctx, depositCnt, networkID, m.DbTx). Return(nil). Once() @@ -1189,10 +1189,10 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { } ctx := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) parentContext := context.Background() - m.Etherman.On("GetNetworkID").Return(uint(0)) + m.Etherman.On("GetNetworkID").Return(uint32(0)) m.Storage.On("GetLatestL1SyncedExitRoot", ctx, nil).Return(ðerman.GlobalExitRoot{}, gerror.ErrStorageNotFound).Once() chEvent := make(chan *etherman.GlobalExitRoot) - chSynced := make(chan uint) + chSynced := make(chan uint32) sync, err := NewSynchronizerTest(parentContext, m.Storage, m.BridgeCtrl, m.Etherman, m.ZkEVMClient, genBlockNumber, chEvent, []chan *etherman.GlobalExitRoot{chEvent}, chSynced, cfg) require.NoError(t, err) @@ -1223,7 +1223,7 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { lastBlock0 := ðerman.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} lastBlock1 := ðerman.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - var networkID uint = 0 + var networkID uint32 = 0 m.Storage. On("GetLastBlock", ctx, networkID, nil). @@ -1295,14 +1295,14 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { Return(nil). Once() - depositCnt := 1 + var depositCnt uint32 = 1 m.Storage. On("GetNumberDeposits", ctx, networkID, ethBlock0.NumberU64(), m.DbTx). - Return(uint64(depositCnt), nil). + Return(depositCnt, nil). Once() m.BridgeCtrl. - On("ReorgMT", ctx, uint(depositCnt), networkID, m.DbTx). + On("ReorgMT", ctx, depositCnt, networkID, m.DbTx). Return(nil). Once() diff --git a/test/benchmark/api_test.go b/test/benchmark/api_test.go index 02590308..8aed493c 100644 --- a/test/benchmark/api_test.go +++ b/test/benchmark/api_test.go @@ -18,8 +18,8 @@ import ( ) var ( - networks []uint = []uint{0, 1} - addresses = initAddresses(10) + networks = []uint32{0, 1} + addresses = initAddresses(10) ) func init() { @@ -51,7 +51,7 @@ func initAddresses(count int) []common.Address { return res } -func randDeposit(r *rand.Rand, depositCnt uint, blockID uint64, networkID int) *etherman.Deposit { +func randDeposit(r *rand.Rand, depositCnt uint32, blockID uint64, networkID uint32) *etherman.Deposit { return ðerman.Deposit{ LeafType: 0, OriginalNetwork: networks[0], @@ -73,9 +73,9 @@ func initServer(b *testing.B, bench benchmark) *bridgectrl.BridgeController { require.NoError(b, err) b.StartTimer() ctx := context.Background() - counts := []uint{0, 0} + counts := []uint32{0, 0} for i := 0; i < bench.initSize+bench.postSize; i++ { - networkID := rand.Intn(2) //nolint: gosec + networkID := uint32(rand.Intn(2)) //nolint: gosec dbTx, err := store.BeginDBTransaction(context.Background()) require.NoError(b, err) id, err := store.AddBlock(context.TODO(), ðerman.Block{ @@ -145,7 +145,7 @@ func addDeposit(b *testing.B, bench benchmark) { depositIDs []uint64 ) for i := 0; i < bench.initSize; i++ { - deposit := randDeposit(r, uint(i), 0, 0) + deposit := randDeposit(r, uint32(i), 0, 0) depositID, err := store.AddDeposit(context.TODO(), deposit, nil) require.NoError(b, err) deposits = append(deposits, deposit) diff --git a/test/client/client.go b/test/client/client.go index 2225b7d2..940b8a77 100644 --- a/test/client/client.go +++ b/test/client/client.go @@ -152,7 +152,7 @@ func (c RestClient) GetClaims(destAddr string, offset, limit uint) ([]*pb.Claim, } // GetMerkleProof returns the merkle proof for the specific bridge transaction. -func (c RestClient) GetMerkleProof(networkID uint32, depositCnt uint64) (*pb.Proof, error) { +func (c RestClient) GetMerkleProof(networkID uint32, depositCnt uint32) (*pb.Proof, error) { resp, err := http.Get(fmt.Sprintf("%s%s?net_id=%d&deposit_cnt=%d", c.bridgeURL, "/merkle-proof", networkID, depositCnt)) if err != nil { return nil, err diff --git a/test/e2e/bridge_network_eth_test.go b/test/e2e/bridge_network_eth_test.go index 09d372c6..73c2eb98 100644 --- a/test/e2e/bridge_network_eth_test.go +++ b/test/e2e/bridge_network_eth_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestCLaimAlreadyClaimedDepositL2toL1(t *testing.T) { +func TestClaimAlreadyClaimedDepositL2toL1(t *testing.T) { if testing.Short() { t.Skip() } @@ -33,7 +33,7 @@ func TestCLaimAlreadyClaimedDepositL2toL1(t *testing.T) { require.NoError(t, err) fmt.Println("Deposit: ", deposit) - err = manualClaimDeposit(ctx, testData, deposit) + err = manualClaimDeposit(ctx, testData, deposit, true) if !isAlreadyClaimedError(err) { require.NoError(t, err) } @@ -93,8 +93,8 @@ func TestEthTransferL2toL1(t *testing.T) { fmt.Println("ETH Balance ", ethInitialBalances.String()) amount := big.NewInt(12344321) txAssetHash := assetEthL2ToL1(ctx, testData, t, amount) - deposit, err := waitDepositToBeReadyToClaim(ctx, testData, txAssetHash, maxTimeToClaimReady) + deposit, err := waitDepositToBeReadyToClaim(ctx, testData, txAssetHash, maxTimeToClaimReady, "") require.NoError(t, err) - err = manualClaimDeposit(ctx, testData, deposit) + err = manualClaimDeposit(ctx, testData, deposit, true) require.NoError(t, err) } diff --git a/test/e2e/bridge_network_shared.go b/test/e2e/bridge_network_shared.go index e749574c..30545a78 100644 --- a/test/e2e/bridge_network_shared.go +++ b/test/e2e/bridge_network_shared.go @@ -243,10 +243,10 @@ func manualClaimDeposit(ctx context.Context, testData *bridge2e2TestData, deposi return err } -func generateGlobalIndex(deposit *pb.Deposit, rollupID uint) *big.Int { +func generateGlobalIndex(deposit *pb.Deposit, rollupID uint32) *big.Int { mainnetFlag := deposit.NetworkId == 0 rollupIndex := rollupID - 1 - localExitRootIndex := uint(deposit.DepositCnt) + localExitRootIndex := deposit.DepositCnt globalIndex := etherman.GenerateGlobalIndex(mainnetFlag, rollupIndex, localExitRootIndex) return globalIndex } diff --git a/test/e2e/bridge_test.go b/test/e2e/bridge_test.go index 7e8f10f7..8fe6b3a1 100644 --- a/test/e2e/bridge_test.go +++ b/test/e2e/bridge_test.go @@ -83,7 +83,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, destNetwork) require.NoError(t, err) log.Debugf("Before deposit global exit root: %v", globalExitRootSMC) log.Debugf("After deposit global exit root: %v", globalExitRoot2) @@ -129,7 +129,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, big.NewInt(0).Cmp(balance)) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim funds in L1 @@ -189,7 +189,7 @@ func TestE2E(t *testing.T) { err = opsman.SendL1Deposit(ctx, tokenAddr, amount1, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, destNetwork) require.NoError(t, err) log.Debugf("Before deposits global exit root: %v", globalExitRootSMC) log.Debugf("After deposits global exit root: %v", globalExitRoot2) @@ -204,7 +204,7 @@ func TestE2E(t *testing.T) { deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &origAddr) require.NoError(t, err) t.Log("deposit: ", deposits[0]) - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim funds in L1 err = opsman.SendL1Claim(ctx, deposits[0], smtProof, smtRollupProof, globaExitRoot) @@ -267,7 +267,7 @@ func TestE2E(t *testing.T) { t.Log("Deposit: ", deposits[0]) t.Log("Before getClaimData: ", deposits[0].NetworkId, deposits[0].DepositCnt) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim funds in L1 log.Debugf("globalExitRoot: %+v", globaExitRoot) @@ -303,7 +303,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) time.Sleep(2 * time.Second) // Check globalExitRoot - globalExitRoot4, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) + globalExitRoot4, err := opsman.GetTrustedGlobalExitRootSynced(ctx, destNetwork) require.NoError(t, err) log.Debugf("Global3 %+v: ", globalExitRoot3) log.Debugf("Global4 %+v: ", globalExitRoot4) @@ -339,7 +339,7 @@ func TestE2E(t *testing.T) { err = opsman.SendL1Deposit(ctx, tokenAddr, amount, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, destNetwork) require.NoError(t, err) log.Debugf("Before deposit global exit root: %v", globalExitRootSMC) log.Debugf("After deposit global exit root: %v", globalExitRoot2) @@ -387,7 +387,7 @@ func TestE2E(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, big.NewInt(0).Cmp(balance)) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim funds in L1 err = opsman.SendL1Claim(ctx, deposits[0], smtProof, smtRollupProof, globaExitRoot) @@ -439,7 +439,7 @@ func TestE2E(t *testing.T) { err = opsman.SendL1Deposit(ctx, tokenAddr, amount3, destNetwork, &destAddr) require.NoError(t, err) // Check globalExitRoot - globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, uint(destNetwork)) + globalExitRoot2, err := opsman.GetTrustedGlobalExitRootSynced(ctx, destNetwork) require.NoError(t, err) log.Debugf("Before deposits global exit root: %v", globalExitRootSMC) log.Debugf("After deposits global exit root: %v", globalExitRoot2) @@ -490,7 +490,7 @@ func TestE2E(t *testing.T) { deposits, err := opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Check the claim tx err = opsman.SendL2Claim(ctx, deposits[0], smtProof, smtRollupProof, globaExitRoot, operations.L2) @@ -510,7 +510,7 @@ func TestE2E(t *testing.T) { deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err = opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err = opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim a bridge message in L1 log.Debugf("globalExitRoot: %+v", globaExitRoot) @@ -553,7 +553,7 @@ func TestE2E(t *testing.T) { deposits, err = opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim a bridge message in L1 log.Debugf("globalExitRoot: %+v", globaExitRoot) @@ -573,14 +573,14 @@ func TestE2E(t *testing.T) { deposits, err := opsman.GetBridgeInfoByDestAddr(ctx, &destAddr) require.NoError(t, err) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) var ger common.Hash = bridgectrl.Hash(globaExitRoot.ExitRoots[0], globaExitRoot.ExitRoots[1]) t.Logf("GetClaimDataByGER: network: %d deposit_cnt:%d GER:%s", deposits[0].NetworkId, deposits[0].DepositCnt, ger.String()) t.Logf("Checking same claim as GetClaim") - smtProofByGer, smtRollupProofByGer, globaExitRootByGer, err := opsman.GetClaimDataByGER(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt), ger) + smtProofByGer, smtRollupProofByGer, globaExitRootByGer, err := opsman.GetClaimDataByGER(ctx, deposits[0].NetworkId, deposits[0].DepositCnt, ger) require.NoError(t, err) require.Equal(t, ger, globaExitRootByGer.GlobalExitRoot) require.Equal(t, globaExitRoot.ExitRoots[1], globaExitRootByGer.ExitRoots[1]) diff --git a/test/e2e/edge_test.go b/test/e2e/edge_test.go index a746b8dd..b81f2a88 100644 --- a/test/e2e/edge_test.go +++ b/test/e2e/edge_test.go @@ -47,7 +47,7 @@ func depositFromL2(ctx context.Context, opsman *operations.Manager, t *testing.T require.NoError(t, err) // Check globalExitRoot // Get the claim data - smtProof, smtRollupProof, globalExitRoot, err := opsman.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globalExitRoot, err := opsman.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) // Claim funds in L1 err = opsman.SendL1Claim(ctx, deposits[0], smtProof, smtRollupProof, globalExitRoot) diff --git a/test/e2e/l2_l2_test.go b/test/e2e/l2_l2_test.go index a8b806de..f32e347d 100644 --- a/test/e2e/l2_l2_test.go +++ b/test/e2e/l2_l2_test.go @@ -64,7 +64,7 @@ func TestL2L2(t *testing.T) { t.Log("balance: ", balance) require.Equal(t, 0, v.Cmp(balance)) // Get the claim data - smtProof, smtRollupProof, globaExitRoot, err := opsman1.GetClaimData(ctx, uint(deposits[0].NetworkId), uint(deposits[0].DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman1.GetClaimData(ctx, deposits[0].NetworkId, deposits[0].DepositCnt) require.NoError(t, err) time.Sleep(5 * time.Second) // Claim funds in destination L2 diff --git a/test/e2e/multiplerollups_test.go b/test/e2e/multiplerollups_test.go index cc9c781b..3a37d1e5 100644 --- a/test/e2e/multiplerollups_test.go +++ b/test/e2e/multiplerollups_test.go @@ -20,9 +20,9 @@ func TestMultipleRollups(t *testing.T) { t.Skip() } const ( - mainnetID uint = 0 - rollup1ID uint = 1 - rollup2ID uint = 2 + mainnetID uint32 = 0 + rollup1ID uint32 = 1 + rollup2ID uint32 = 2 ) ctx := context.Background() opsman1, err := operations.GetOpsman(ctx, "http://localhost:8123", "test_db", "8080", "9090", "5435", 1) @@ -163,9 +163,9 @@ func TestMultipleRollups(t *testing.T) { } type bridgeData struct { - originNet uint - destNet uint - originTokenNet uint + originNet uint32 + destNet uint32 + originTokenNet uint32 originTokenAddr common.Address amount *big.Int } @@ -218,7 +218,7 @@ func bridge( log.Debug("deposit claimed on L2") } else { log.Debug("getting proof to perform claim from bridge service...") - smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, uint(deposit.NetworkId), uint(deposit.DepositCnt)) + smtProof, smtRollupProof, globaExitRoot, err := opsman.GetClaimData(ctx, deposit.NetworkId, deposit.DepositCnt) require.NoError(t, err) log.Debug("sending claim tx to L1") err = opsman.SendL1Claim(ctx, deposit, smtProof, smtRollupProof, globaExitRoot) diff --git a/test/operations/interfaces.go b/test/operations/interfaces.go index 1f0f81b3..d213e219 100644 --- a/test/operations/interfaces.go +++ b/test/operations/interfaces.go @@ -11,15 +11,15 @@ import ( // StorageInterface is a storage interface. type StorageInterface interface { - GetLastBlock(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.Block, error) - GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetLastBlock(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.Block, error) + GetLatestExitRoot(ctx context.Context, networkID, destNetwork uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) GetLatestL1SyncedExitRoot(ctx context.Context, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) - GetLatestTrustedExitRoot(ctx context.Context, networkID uint, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) - GetTokenWrapped(ctx context.Context, originalNetwork uint, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) - GetDepositCountByRoot(ctx context.Context, root []byte, network uint8, dbTx pgx.Tx) (uint, error) - UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64, dbTx pgx.Tx) error - GetClaim(ctx context.Context, depositCount, origNetworkID, networkID uint, dbTx pgx.Tx) (*etherman.Claim, error) - GetClaims(ctx context.Context, destAddr string, limit uint, offset uint, dbTx pgx.Tx) ([]*etherman.Claim, error) + GetLatestTrustedExitRoot(ctx context.Context, networkID uint32, dbTx pgx.Tx) (*etherman.GlobalExitRoot, error) + GetTokenWrapped(ctx context.Context, originalNetwork uint32, originalTokenAddress common.Address, dbTx pgx.Tx) (*etherman.TokenWrapped, error) + GetDepositCountByRoot(ctx context.Context, root []byte, network uint32, dbTx pgx.Tx) (uint32, error) + UpdateBlocksForTesting(ctx context.Context, networkID uint32, blockNum uint64, dbTx pgx.Tx) error + GetClaim(ctx context.Context, depositCount, origNetworkID, networkID uint32, dbTx pgx.Tx) (*etherman.Claim, error) + GetClaims(ctx context.Context, destAddr string, limit uint32, offset uint32, dbTx pgx.Tx) ([]*etherman.Claim, error) UpdateDepositsStatusForTesting(ctx context.Context, dbTx pgx.Tx) error GetLatestMonitoredTxGroupID(ctx context.Context, dbTx pgx.Tx) (uint64, error) // synchronizer diff --git a/test/operations/manager.go b/test/operations/manager.go index 09d79d61..fc372d52 100644 --- a/test/operations/manager.go +++ b/test/operations/manager.go @@ -68,7 +68,7 @@ var accHexPrivateKeys = map[NetworkSID]string{ type Config struct { L1NetworkURL string L2NetworkURL string - L2NetworkID uint + L2NetworkID uint32 Storage db.Config BT bridgectrl.Config BS server.Config @@ -110,7 +110,7 @@ func NewManager(ctx context.Context, cfg *Config) (*Manager, error) { if err != nil { return nil, err } - bt, err := bridgectrl.NewBridgeController(ctx, cfg.BT, []uint{0, cfg.L2NetworkID}, pgst) + bt, err := bridgectrl.NewBridgeController(ctx, cfg.BT, []uint32{0, cfg.L2NetworkID}, pgst) if err != nil { return nil, err } @@ -122,7 +122,7 @@ func NewManager(ctx context.Context, cfg *Config) (*Manager, error) { if err != nil { return nil, err } - bService := server.NewBridgeService(cfg.BS, cfg.BT.Height, []uint{0, cfg.L2NetworkID}, pgst) + bService := server.NewBridgeService(cfg.BS, cfg.BT.Height, []uint32{0, cfg.L2NetworkID}, pgst) opsman.storage = st.(StorageInterface) opsman.bridgetree = bt opsman.bridgeService = bService @@ -209,7 +209,7 @@ func (m *Manager) CustomCheckClaim(ctx context.Context, deposit *pb.Deposit, int // GetNumberClaims get the number of claim events synced func (m *Manager) GetNumberClaims(ctx context.Context, destAddr string) (int, error) { - const limit = 100 + const limit uint32 = 100 claims, err := m.storage.GetClaims(ctx, destAddr, limit, 0, nil) if err != nil { return 0, err @@ -225,7 +225,7 @@ func (m *Manager) SendL1Deposit(ctx context.Context, tokenAddr common.Address, a return err } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, uint(destNetwork), nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, destNetwork, nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -236,7 +236,7 @@ func (m *Manager) SendL1Deposit(ctx context.Context, tokenAddr common.Address, a } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, uint(destNetwork)) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, destNetwork) } // SendMultipleL1Deposit sends a deposit from l1 to l2. @@ -275,7 +275,7 @@ func (m *Manager) SendL2Deposit(ctx context.Context, tokenAddr common.Address, a log.Error("error getting networkID: ", networkID) return err } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, uint(networkID), uint(destNetwork), nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, networkID, destNetwork, nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -286,7 +286,7 @@ func (m *Manager) SendL2Deposit(ctx context.Context, tokenAddr common.Address, a } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, uint(networkID), uint(destNetwork)) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, networkID, destNetwork) } // SendL1BridgeMessage bridges a message from l1 to l2. @@ -303,7 +303,7 @@ func (m *Manager) SendL1BridgeMessage(ctx context.Context, destAddr common.Addre } } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, uint(destNetwork), nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, 0, destNetwork, nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -315,7 +315,7 @@ func (m *Manager) SendL1BridgeMessage(ctx context.Context, destAddr common.Addre } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, uint(destNetwork)) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, 0, destNetwork) } // SendL2BridgeMessage bridges a message from l2 to l1. @@ -332,7 +332,7 @@ func (m *Manager) SendL2BridgeMessage(ctx context.Context, destAddr common.Addre return err } - orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, uint(networkID), uint(destNetwork), nil) + orgExitRoot, err := m.storage.GetLatestExitRoot(ctx, networkID, destNetwork, nil) if err != nil && err != gerror.ErrStorageNotFound { return err } @@ -344,7 +344,7 @@ func (m *Manager) SendL2BridgeMessage(ctx context.Context, destAddr common.Addre } // sync for new exit root - return m.WaitExitRootToBeSynced(ctx, orgExitRoot, uint(networkID), uint(destNetwork)) + return m.WaitExitRootToBeSynced(ctx, orgExitRoot, networkID, destNetwork) } // Setup creates all the required components and initializes them according to @@ -605,10 +605,10 @@ func (m *Manager) CheckAccountTokenBalance(ctx context.Context, network NetworkS } // GetClaimDataByGER gets the claim data by ger -func (m *Manager) GetClaimDataByGER(ctx context.Context, networkID, depositCount uint, ger common.Hash) ([MtHeight][bridgectrl.KeyLen]byte, [MtHeight][bridgectrl.KeyLen]byte, *etherman.GlobalExitRoot, error) { +func (m *Manager) GetClaimDataByGER(ctx context.Context, networkID, depositCount uint32, ger common.Hash) ([MtHeight][bridgectrl.KeyLen]byte, [MtHeight][bridgectrl.KeyLen]byte, *etherman.GlobalExitRoot, error) { res, err := m.bridgeService.GetProofByGER(context.Background(), &pb.GetProofByGERRequest{ - NetId: uint32(networkID), - DepositCnt: uint64(depositCount), + NetId: networkID, + DepositCnt: depositCount, Ger: ger.String(), }) if err != nil { @@ -634,10 +634,10 @@ func (m *Manager) GetClaimDataByGER(ctx context.Context, networkID, depositCount } // GetClaimData gets the claim data -func (m *Manager) GetClaimData(ctx context.Context, networkID, depositCount uint) ([MtHeight][bridgectrl.KeyLen]byte, [MtHeight][bridgectrl.KeyLen]byte, *etherman.GlobalExitRoot, error) { +func (m *Manager) GetClaimData(ctx context.Context, networkID, depositCount uint32) ([MtHeight][bridgectrl.KeyLen]byte, [MtHeight][bridgectrl.KeyLen]byte, *etherman.GlobalExitRoot, error) { res, err := m.bridgeService.GetProof(context.Background(), &pb.GetProofRequest{ - NetId: uint32(networkID), - DepositCnt: uint64(depositCount), + NetId: networkID, + DepositCnt: depositCount, }) if err != nil { return [MtHeight][32]byte{}, [MtHeight][32]byte{}, nil, err @@ -703,7 +703,7 @@ func (m *Manager) SendL2Claim(ctx context.Context, deposit *pb.Deposit, smtProof } // GetTrustedGlobalExitRootSynced reads the latest globalexitroot of a batch proposal from db -func (m *Manager) GetTrustedGlobalExitRootSynced(ctx context.Context, networkID uint) (*etherman.GlobalExitRoot, error) { +func (m *Manager) GetTrustedGlobalExitRootSynced(ctx context.Context, networkID uint32) (*etherman.GlobalExitRoot, error) { return m.storage.GetLatestTrustedExitRoot(ctx, networkID, nil) } @@ -795,7 +795,7 @@ func (m *Manager) ApproveERC20(ctx context.Context, erc20Addr, bridgeAddr common } // GetTokenWrapped get token wrapped info -func (m *Manager) GetTokenWrapped(ctx context.Context, originNetwork uint, originalTokenAddr common.Address, isCreated bool) (*etherman.TokenWrapped, error) { +func (m *Manager) GetTokenWrapped(ctx context.Context, originNetwork uint32, originalTokenAddr common.Address, isCreated bool) (*etherman.TokenWrapped, error) { if isCreated { if err := operations.Poll(defaultInterval, defaultDeadline, func() (bool, error) { wrappedToken, err := m.storage.GetTokenWrapped(ctx, originNetwork, originalTokenAddr, nil) @@ -811,12 +811,12 @@ func (m *Manager) GetTokenWrapped(ctx context.Context, originNetwork uint, origi } // UpdateBlocksForTesting updates the hash of blocks. -func (m *Manager) UpdateBlocksForTesting(ctx context.Context, networkID uint, blockNum uint64) error { +func (m *Manager) UpdateBlocksForTesting(ctx context.Context, networkID uint32, blockNum uint64) error { return m.storage.UpdateBlocksForTesting(ctx, networkID, blockNum, nil) } // WaitExitRootToBeSynced waits until new exit root is synced. -func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *etherman.GlobalExitRoot, networkID, destNetwork uint) error { +func (m *Manager) WaitExitRootToBeSynced(ctx context.Context, orgExitRoot *etherman.GlobalExitRoot, networkID, destNetwork uint32) error { log.Debugf("WaitExitRootToBeSynced: %+v", orgExitRoot) if orgExitRoot == nil { orgExitRoot = ðerman.GlobalExitRoot{ @@ -875,13 +875,13 @@ func (m *Manager) ERC20Transfer(ctx context.Context, erc20Addr, to common.Addres return client.ERC20Transfer(ctx, erc20Addr, to, amount, auth) } -func (m *Manager) GetTokenAddress(ctx context.Context, network NetworkSID, originalNetwork uint, originalTokenAddr common.Address) (common.Address, error) { +func (m *Manager) GetTokenAddress(ctx context.Context, network NetworkSID, originalNetwork uint32, originalTokenAddr common.Address) (common.Address, error) { zeroAddr := common.Address{} if network == L1 { if originalNetwork == 0 { return originalTokenAddr, nil } - token, err := m.storage.GetTokenWrapped(ctx, uint(originalNetwork), originalTokenAddr, nil) + token, err := m.storage.GetTokenWrapped(ctx, originalNetwork, originalTokenAddr, nil) if err != nil { return common.Address{}, err } @@ -894,10 +894,10 @@ func (m *Manager) GetTokenAddress(ctx context.Context, network NetworkSID, origi if err != nil { return common.Address{}, err } - if originalNetwork == uint(networkID) { + if originalNetwork == networkID { return originalTokenAddr, nil } - token, err := m.storage.GetTokenWrapped(ctx, uint(originalNetwork), originalTokenAddr, nil) + token, err := m.storage.GetTokenWrapped(ctx, originalNetwork, originalTokenAddr, nil) if err != nil { return common.Address{}, err } @@ -925,7 +925,7 @@ func (m *Manager) GetL1Balance(ctx context.Context, originalNetwork uint32, orig return m.CheckAccountTokenBalance(ctx, L1, originalTokenAddr, &holder) } } else { - token, err := m.storage.GetTokenWrapped(ctx, uint(originalNetwork), originalTokenAddr, nil) + token, err := m.storage.GetTokenWrapped(ctx, originalNetwork, originalTokenAddr, nil) if err == gerror.ErrStorageNotFound { return big.NewInt(0), nil } else if err != nil { @@ -950,7 +950,7 @@ func (m *Manager) GetL2Balance(ctx context.Context, originalNetwork uint32, orig } else { // If the token is not created on L1 or in this rollup, it's needed to calculate // the addr of the token on the rollup - token, err := m.storage.GetTokenWrapped(ctx, uint(originalNetwork), originalTokenAddr, nil) + token, err := m.storage.GetTokenWrapped(ctx, originalNetwork, originalTokenAddr, nil) if err == gerror.ErrStorageNotFound { return big.NewInt(0), nil } else if err != nil { @@ -961,7 +961,7 @@ func (m *Manager) GetL2Balance(ctx context.Context, originalNetwork uint32, orig return m.CheckAccountTokenBalance(ctx, L2, rollupAddr, &holder) } -func GetOpsman(ctx context.Context, l2NetworkURL, dbName, bridgeServiceHTTPPort, bridgeServiceGRPCPort, port string, networkID uint) (*Manager, error) { +func GetOpsman(ctx context.Context, l2NetworkURL, dbName, bridgeServiceHTTPPort, bridgeServiceGRPCPort, port string, networkID uint32) (*Manager, error) { //nolint:gomnd opsCfg := &Config{ L1NetworkURL: "http://localhost:8545", diff --git a/test/operations/mockserver.go b/test/operations/mockserver.go index 2152b8f2..544c8f08 100644 --- a/test/operations/mockserver.go +++ b/test/operations/mockserver.go @@ -10,7 +10,7 @@ import ( ) // RunMockServer runs mock server -func RunMockServer(dbType string, height uint8, networks []uint) (*bridgectrl.BridgeController, StorageInterface, error) { +func RunMockServer(dbType string, height uint8, networks []uint32) (*bridgectrl.BridgeController, StorageInterface, error) { if dbType != "postgres" { return nil, nil, fmt.Errorf("not registered database") } diff --git a/test/scripts/initialClaim/main.go b/test/scripts/initialClaim/main.go index 736830fa..3bcf204a 100644 --- a/test/scripts/initialClaim/main.go +++ b/test/scripts/initialClaim/main.go @@ -92,14 +92,14 @@ func main() { } e := etherman.Deposit{ LeafType: uint8(bridgeData.LeafType), - OriginalNetwork: uint(bridgeData.OrigNet), + OriginalNetwork: bridgeData.OrigNet, OriginalAddress: common.HexToAddress(bridgeData.OrigAddr), Amount: a, - DestinationNetwork: uint(bridgeData.DestNet), + DestinationNetwork: bridgeData.DestNet, DestinationAddress: common.HexToAddress(bridgeData.DestAddr), - DepositCount: uint(bridgeData.DepositCnt), + DepositCount: bridgeData.DepositCnt, BlockNumber: bridgeData.BlockNum, - NetworkID: uint(bridgeData.NetworkId), + NetworkID: bridgeData.NetworkId, TxHash: common.HexToHash(bridgeData.TxHash), Metadata: metadata, ReadyForClaim: bridgeData.ReadyForClaim, @@ -119,7 +119,7 @@ func main() { if err != nil { log.Fatal("Error: ", err) } - tx, err := c.BuildSendClaim(ctx, &e, smtProof, smtRollupProof, globalExitRoot, 0, 0, l2GasLimit, uint(rollupID), auth) + tx, err := c.BuildSendClaim(ctx, &e, smtProof, smtRollupProof, globalExitRoot, 0, 0, l2GasLimit, rollupID, auth) if err != nil { log.Fatal("error: ", err) } diff --git a/test/vectors/vectors.go b/test/vectors/vectors.go index 43ebdc27..4381494e 100644 --- a/test/vectors/vectors.go +++ b/test/vectors/vectors.go @@ -2,10 +2,10 @@ package vectors // DepositVectorRaw represents the deposit vector type DepositVectorRaw struct { - OriginalNetwork uint `json:"originNetwork"` + OriginalNetwork uint32 `json:"originNetwork"` TokenAddress string `json:"tokenAddress"` Amount string `json:"amount"` - DestinationNetwork uint `json:"destinationNetwork"` + DestinationNetwork uint32 `json:"destinationNetwork"` DestinationAddress string `json:"destinationAddress"` ExpectedHash string `json:"leafValue"` CurrentHash string `json:"currentLeafValue"` @@ -23,18 +23,18 @@ type MTRootVectorRaw struct { // MTClaimVectorRaw represents the merkle proof type MTClaimVectorRaw struct { Deposits []DepositVectorRaw `json:"leafs"` - Index uint `json:"index"` + Index uint32 `json:"index"` MerkleProof []string `json:"proof"` ExpectedRoot string `json:"root"` } // ClaimVectorRaw represents the claim vector type ClaimVectorRaw struct { - Index uint `json:"index"` - OriginalNetwork uint `json:"originNetwork"` + Index uint32 `json:"index"` + OriginalNetwork uint32 `json:"originNetwork"` Token string `json:"token"` Amount string `json:"amount"` - DestinationNetwork uint `json:"destNetwork"` + DestinationNetwork uint32 `json:"destNetwork"` DestinationAddress string `json:"destAddress"` BlockNumber uint64 `json:"blockNumber"` } @@ -44,5 +44,5 @@ type BlockVectorRaw struct { BlockNumber uint64 `json:"blockNumber"` BlockHash string `json:"blockHash"` ParentHash string `json:"parentHash"` - NetworkID uint `json:"networkID"` + NetworkID uint32 `json:"networkID"` } diff --git a/utils/client.go b/utils/client.go index e598f0b9..5ca7bf17 100644 --- a/utils/client.go +++ b/utils/client.go @@ -32,9 +32,9 @@ import ( const ( // LeafTypeAsset represents a bridge asset - LeafTypeAsset uint32 = 0 + LeafTypeAsset uint8 = 0 // LeafTypeMessage represents a bridge message - LeafTypeMessage uint32 = 1 + LeafTypeMessage uint8 = 1 mtHeight = 32 keyLen = 32 @@ -206,7 +206,7 @@ func (c *Client) SendBridgeMessage(ctx context.Context, destNetwork uint32, dest } // BuildSendClaim builds a tx data to be sent to the bridge method SendClaim. -func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, smtProof [mtHeight][keyLen]byte, smtRollupProof [mtHeight][keyLen]byte, globalExitRoot *etherman.GlobalExitRoot, nonce, gasPrice int64, gasLimit uint64, rollupID uint, auth *bind.TransactOpts) (*types.Transaction, error) { +func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, smtProof [mtHeight][keyLen]byte, smtRollupProof [mtHeight][keyLen]byte, globalExitRoot *etherman.GlobalExitRoot, nonce, gasPrice int64, gasLimit uint64, rollupID uint32, auth *bind.TransactOpts) (*types.Transaction, error) { opts := *auth opts.NoSend = true // force nonce, gas limit and gas price to avoid querying it from the chain @@ -222,10 +222,10 @@ func (c *Client) BuildSendClaim(ctx context.Context, deposit *etherman.Deposit, rollupIndex := rollupID - 1 localExitRootIndex := deposit.DepositCount globalIndex := etherman.GenerateGlobalIndex(mainnetFlag, rollupIndex, localExitRootIndex) - if deposit.LeafType == uint8(LeafTypeAsset) { + if deposit.LeafType == LeafTypeAsset { tx, err = c.Bridge.ClaimAsset(&opts, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], uint32(deposit.OriginalNetwork), deposit.OriginalAddress, uint32(deposit.DestinationNetwork), deposit.DestinationAddress, deposit.Amount, deposit.Metadata) - } else if deposit.LeafType == uint8(LeafTypeMessage) { + } else if deposit.LeafType == LeafTypeMessage { tx, err = c.Bridge.ClaimMessage(&opts, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], uint32(deposit.OriginalNetwork), deposit.OriginalAddress, uint32(deposit.DestinationNetwork), deposit.DestinationAddress, deposit.Amount, deposit.Metadata) } if err != nil { @@ -248,7 +248,7 @@ func (c *Client) SendClaim(ctx context.Context, deposit *pb.Deposit, smtProof [m err error ) globalIndex, _ := big.NewInt(0).SetString(deposit.GlobalIndex, 0) - if deposit.LeafType == LeafTypeAsset { + if deposit.LeafType == uint32(LeafTypeAsset) { tx, err = c.Bridge.ClaimAsset(auth, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], deposit.OrigNet, common.HexToAddress(deposit.OrigAddr), deposit.DestNet, common.HexToAddress(deposit.DestAddr), amount, common.FromHex(deposit.Metadata)) if err != nil { a, _ := polygonzkevmbridge.PolygonzkevmbridgeMetaData.GetAbi() @@ -266,7 +266,7 @@ func (c *Client) SendClaim(ctx context.Context, deposit *pb.Deposit, smtProof [m "id": 1 }'`, auth.From, "", common.Bytes2Hex(input)) } - } else if deposit.LeafType == LeafTypeMessage { + } else if deposit.LeafType == uint32(LeafTypeMessage) { tx, err = c.Bridge.ClaimMessage(auth, smtProof, smtRollupProof, globalIndex, globalExitRoot.ExitRoots[0], globalExitRoot.ExitRoots[1], deposit.OrigNet, common.HexToAddress(deposit.OrigAddr), deposit.DestNet, common.HexToAddress(deposit.DestAddr), amount, common.FromHex(deposit.Metadata)) } if err != nil {