Skip to content

Commit

Permalink
node store v2
Browse files Browse the repository at this point in the history
  • Loading branch information
ian-shim committed Nov 12, 2024
1 parent ab183e3 commit b49ce68
Show file tree
Hide file tree
Showing 9 changed files with 341 additions and 43 deletions.
6 changes: 2 additions & 4 deletions core/v2/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,14 +205,12 @@ func checkBatchByUniversalVerifier(
cst core.IndexedChainState,
packagedBlobs map[core.OperatorID][]*corev2.BlobShard,
pool common.WorkerPool,
referenceBlockNumber uint64,
) error {

ctx := context.Background()

quorums := []core.QuorumID{0, 1}
state, _ := cst.GetIndexedOperatorState(context.Background(), 0, quorums)
// numBlob := len(encodedBlobs)

var errList *multierror.Error

Expand All @@ -222,7 +220,7 @@ func checkBatchByUniversalVerifier(

blobs := packagedBlobs[id]

err := val.ValidateBlobs(ctx, blobs, pool, referenceBlockNumber)
err := val.ValidateBlobs(ctx, blobs, pool, state.OperatorState)
if err != nil {
errList = multierror.Append(errList, err)
}
Expand Down Expand Up @@ -268,7 +266,7 @@ func TestValidationSucceeds(t *testing.T) {
packagedBlobs, cst := prepareBlobs(t, operatorCount, headers, blobs, bn)

t.Run(fmt.Sprintf("universal verifier operatorCount=%v over %v blobs", operatorCount, len(blobs)), func(t *testing.T) {
err := checkBatchByUniversalVerifier(cst, packagedBlobs, pool, bn)
err := checkBatchByUniversalVerifier(cst, packagedBlobs, pool)
assert.NoError(t, err)
})

Expand Down
48 changes: 48 additions & 0 deletions core/v2/serialization.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package v2

import (
"bytes"
"encoding/gob"
"fmt"
"math/big"

Expand Down Expand Up @@ -218,6 +220,19 @@ func (c *BlobCertificate) Hash() ([32]byte, error) {
return blobCertHash, nil
}

func (c *BlobCertificate) Serialize() ([]byte, error) {
return encode(c)
}

func DeserializeBlobCertificate(data []byte) (*BlobCertificate, error) {
var c BlobCertificate
err := decode(data, &c)
if err != nil {
return nil, err
}
return &c, nil
}

// GetBatchHeaderHash returns the hash of the batch header
func (h BatchHeader) Hash() ([32]byte, error) {
var headerHash [32]byte
Expand Down Expand Up @@ -263,3 +278,36 @@ func (h BatchHeader) Hash() ([32]byte, error) {

return headerHash, nil
}

func (h BatchHeader) Serialize() ([]byte, error) {
return encode(h)
}

func DeserializeBatchHeader(data []byte) (*BatchHeader, error) {
var h BatchHeader
err := decode(data, &h)
if err != nil {
return nil, err
}
return &h, nil
}

func encode(obj any) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(obj)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}

func decode(data []byte, obj any) error {
buf := bytes.NewBuffer(data)
dec := gob.NewDecoder(buf)
err := dec.Decode(obj)
if err != nil {
return err
}
return nil
}
44 changes: 44 additions & 0 deletions core/v2/serialization_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,21 @@ func TestBatchHeaderHash(t *testing.T) {
assert.Equal(t, "891d0936da4627f445ef193aad63afb173409af9e775e292e4e35aff790a45e2", hex.EncodeToString(hash[:]))
}

func TestBatchHeaderSerialization(t *testing.T) {
batchRoot := [32]byte{}
copy(batchRoot[:], []byte("batchRoot"))
batchHeader := &v2.BatchHeader{
ReferenceBlockNumber: 1000,
BatchRoot: batchRoot,
}

serialized, err := batchHeader.Serialize()
assert.NoError(t, err)
deserialized, err := v2.DeserializeBatchHeader(serialized)
assert.NoError(t, err)
assert.Equal(t, batchHeader, deserialized)
}

func TestBlobCertHash(t *testing.T) {
data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES)
commitments, err := p.GetCommitments(data)
Expand Down Expand Up @@ -97,3 +112,32 @@ func TestBlobCertHash(t *testing.T) {
// 0xc4512b8702f69cb837fff50a93d3d28aada535b1f151b64db45859c3f5bb096a verified in solidity
assert.Equal(t, "c4512b8702f69cb837fff50a93d3d28aada535b1f151b64db45859c3f5bb096a", hex.EncodeToString(hash[:]))
}

func TestBlobCertSerialization(t *testing.T) {
data := codec.ConvertByPaddingEmptyByte(GETTYSBURG_ADDRESS_BYTES)
commitments, err := p.GetCommitments(data)
if err != nil {
t.Fatal(err)
}

blobCert := &v2.BlobCertificate{
BlobHeader: &v2.BlobHeader{
BlobVersion: 0,
BlobCommitments: commitments,
QuorumNumbers: []core.QuorumID{0, 1},
PaymentMetadata: core.PaymentMetadata{
AccountID: "0x123",
BinIndex: 5,
CumulativePayment: big.NewInt(100),
},
Signature: []byte{1, 2, 3},
},
RelayKeys: []v2.RelayKey{4, 5, 6},
}

serialized, err := blobCert.Serialize()
assert.NoError(t, err)
deserialized, err := v2.DeserializeBlobCertificate(serialized)
assert.NoError(t, err)
assert.Equal(t, blobCert, deserialized)
}
4 changes: 3 additions & 1 deletion core/v2/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,9 @@ func (c *BlobCertificate) ToProtobuf() (*commonpb.BlobCertificate, error) {
}

type BatchHeader struct {
BatchRoot [32]byte
// BatchRoot is the root of a Merkle tree whose leaves are the keys of the blobs in the batch
BatchRoot [32]byte
// ReferenceBlockNumber is the block number at which all operator information (stakes, indexes, etc.) is taken from
ReferenceBlockNumber uint64
}

Expand Down
10 changes: 5 additions & 5 deletions core/v2/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func (v *ShardValidator) validateBlobQuorum(quorum core.QuorumID, blob *BlobShar
return chunks, &assignment, nil
}

func (v *ShardValidator) ValidateBlobs(ctx context.Context, blobs []*BlobShard, pool common.WorkerPool, referenceBlockNumber uint64) error {
func (v *ShardValidator) ValidateBlobs(ctx context.Context, blobs []*BlobShard, pool common.WorkerPool, state *core.OperatorState) error {
var err error
subBatchMap := make(map[encoding.EncodingParams]*encoding.SubBatch)
blobCommitmentList := make([]encoding.BlobCommitments, len(blobs))
Expand All @@ -82,10 +82,10 @@ func (v *ShardValidator) ValidateBlobs(ctx context.Context, blobs []*BlobShard,
return fmt.Errorf("number of bundles (%d) does not match number of quorums (%d)", len(blob.Bundles), len(blob.BlobHeader.QuorumNumbers))
}

state, err := v.chainState.GetOperatorState(ctx, uint(referenceBlockNumber), blob.BlobHeader.QuorumNumbers)
if err != nil {
return err
}
// state, err := v.chainState.GetOperatorState(ctx, uint(referenceBlockNumber), blob.BlobHeader.QuorumNumbers)
// if err != nil {
// return err
// }

// Saved for the blob length validation
blobCommitmentList[k] = blob.BlobHeader.BlobCommitments
Expand Down
4 changes: 0 additions & 4 deletions node/grpc/server_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,3 @@ func (s *ServerV2) NodeInfo(ctx context.Context, in *pb.NodeInfoRequest) (*pb.No
func (s *ServerV2) StoreChunks(ctx context.Context, in *pb.StoreChunksRequest) (*pb.StoreChunksReply, error) {
return &pb.StoreChunksReply{}, api.NewErrorUnimplemented()
}

func (s *ServerV2) GetChunks(context.Context, *pb.GetChunksRequest) (*pb.GetChunksReply, error) {
return &pb.GetChunksReply{}, api.NewErrorUnimplemented()
}
63 changes: 34 additions & 29 deletions node/node_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@ import (
"github.com/stretchr/testify/require"
)

func TestDownloadBatch(t *testing.T) {
c := newComponents(t)
ctx := context.Background()
func mockBatch(t *testing.T) ([]v2.BlobKey, *v2.Batch, []map[core.QuorumID]core.Bundle) {
commitments := mockCommitment(t)
bh0 := &v2.BlobHeader{
BlobVersion: 0,
Expand Down Expand Up @@ -157,63 +155,70 @@ func TestDownloadBatch(t *testing.T) {
},
}

batch := &v2.Batch{
return []v2.BlobKey{blobKey0, blobKey1, blobKey2}, &v2.Batch{
BatchHeader: &v2.BatchHeader{
BatchRoot: [32]byte{1, 1, 1},
ReferenceBlockNumber: 100,
},
BlobCertificates: []*v2.BlobCertificate{blobCert0, blobCert1, blobCert2},
}
}, []map[core.QuorumID]core.Bundle{bundles0, bundles1, bundles2}
}

func TestDownloadBatch(t *testing.T) {
c := newComponents(t)
ctx := context.Background()
blobKeys, batch, bundles := mockBatch(t)
blobCerts := batch.BlobCertificates

bundles00Bytes, err := bundles0[0].Serialize()
bundles00Bytes, err := bundles[0][0].Serialize()
require.NoError(t, err)
bundles01Bytes, err := bundles0[1].Serialize()
bundles01Bytes, err := bundles[0][1].Serialize()
require.NoError(t, err)
bundles10Bytes, err := bundles1[0].Serialize()
bundles10Bytes, err := bundles[1][0].Serialize()
require.NoError(t, err)
bundles11Bytes, err := bundles1[1].Serialize()
bundles11Bytes, err := bundles[1][1].Serialize()
require.NoError(t, err)
bundles21Bytes, err := bundles2[1].Serialize()
bundles21Bytes, err := bundles[2][1].Serialize()
require.NoError(t, err)
bundles22Bytes, err := bundles2[2].Serialize()
bundles22Bytes, err := bundles[2][2].Serialize()
require.NoError(t, err)
c.relayClient.On("GetChunksByRange", mock.Anything, v2.RelayKey(0), mock.Anything).Return([][]byte{bundles00Bytes, bundles01Bytes, bundles21Bytes, bundles22Bytes}, nil).Run(func(args mock.Arguments) {
requests := args.Get(2).([]*clients.ChunkRequestByRange)
require.Len(t, requests, 4)
require.Equal(t, blobKey0, requests[0].BlobKey)
require.Equal(t, blobKey0, requests[1].BlobKey)
require.Equal(t, blobKey2, requests[2].BlobKey)
require.Equal(t, blobKey2, requests[3].BlobKey)
require.Equal(t, blobKeys[0], requests[0].BlobKey)
require.Equal(t, blobKeys[0], requests[1].BlobKey)
require.Equal(t, blobKeys[2], requests[2].BlobKey)
require.Equal(t, blobKeys[2], requests[3].BlobKey)
})
c.relayClient.On("GetChunksByRange", mock.Anything, v2.RelayKey(1), mock.Anything).Return([][]byte{bundles10Bytes, bundles11Bytes}, nil).Run(func(args mock.Arguments) {
requests := args.Get(2).([]*clients.ChunkRequestByRange)
require.Len(t, requests, 2)
require.Equal(t, blobKey1, requests[0].BlobKey)
require.Equal(t, blobKey1, requests[1].BlobKey)
require.Equal(t, blobKeys[1], requests[0].BlobKey)
require.Equal(t, blobKeys[1], requests[1].BlobKey)
})
blobShards, rawBundles, err := c.node.DownloadBatch(ctx, batch)
require.NoError(t, err)
require.Len(t, blobShards, 3)
require.Equal(t, blobCert0, blobShards[0].BlobCertificate)
require.Equal(t, blobCert1, blobShards[1].BlobCertificate)
require.Equal(t, blobCert2, blobShards[2].BlobCertificate)
require.Equal(t, blobCerts[0], blobShards[0].BlobCertificate)
require.Equal(t, blobCerts[1], blobShards[1].BlobCertificate)
require.Equal(t, blobCerts[2], blobShards[2].BlobCertificate)
require.Contains(t, blobShards[0].Bundles, core.QuorumID(0))
require.Contains(t, blobShards[0].Bundles, core.QuorumID(1))
require.Contains(t, blobShards[1].Bundles, core.QuorumID(0))
require.Contains(t, blobShards[1].Bundles, core.QuorumID(1))
require.Contains(t, blobShards[2].Bundles, core.QuorumID(1))
require.Contains(t, blobShards[2].Bundles, core.QuorumID(2))
bundleEqual(t, bundles0[0], blobShards[0].Bundles[0])
bundleEqual(t, bundles0[1], blobShards[0].Bundles[1])
bundleEqual(t, bundles1[0], blobShards[1].Bundles[0])
bundleEqual(t, bundles1[1], blobShards[1].Bundles[1])
bundleEqual(t, bundles2[1], blobShards[2].Bundles[1])
bundleEqual(t, bundles2[2], blobShards[2].Bundles[2])
bundleEqual(t, bundles[0][0], blobShards[0].Bundles[0])
bundleEqual(t, bundles[0][1], blobShards[0].Bundles[1])
bundleEqual(t, bundles[1][0], blobShards[1].Bundles[0])
bundleEqual(t, bundles[1][1], blobShards[1].Bundles[1])
bundleEqual(t, bundles[2][1], blobShards[2].Bundles[1])
bundleEqual(t, bundles[2][2], blobShards[2].Bundles[2])

require.Len(t, rawBundles, 3)
require.Equal(t, blobCert0, rawBundles[0].BlobCertificate)
require.Equal(t, blobCert1, rawBundles[1].BlobCertificate)
require.Equal(t, blobCert2, rawBundles[2].BlobCertificate)
require.Equal(t, blobCerts[0], rawBundles[0].BlobCertificate)
require.Equal(t, blobCerts[1], rawBundles[1].BlobCertificate)
require.Equal(t, blobCerts[2], rawBundles[2].BlobCertificate)
require.Contains(t, rawBundles[0].Bundles, core.QuorumID(0))
require.Contains(t, rawBundles[0].Bundles, core.QuorumID(1))
require.Contains(t, rawBundles[1].Bundles, core.QuorumID(0))
Expand Down
Loading

0 comments on commit b49ce68

Please sign in to comment.