Skip to content

Commit

Permalink
Build out core encoder component
Browse files Browse the repository at this point in the history
  • Loading branch information
mooselumph committed May 31, 2023
1 parent 065492c commit 44a5ccc
Show file tree
Hide file tree
Showing 12 changed files with 170 additions and 54 deletions.
26 changes: 12 additions & 14 deletions core/assignment.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ type OperatorIndex uint

type ChunkIndex uint

type Assignments struct {
type AssignmentInfo struct {
StakeThreshold uint
NumChunks uint
Assignments []Assignment
TotalChunks uint
}

type Assignment struct {
Expand All @@ -35,9 +34,9 @@ func (c *Assignment) GetIndices() []ChunkIndex {
}

type AssignmentCoordinator interface {
GetAssignments(state OperatorState, quorum QuorumParams) (Assignments, error)
GetAssignments(state OperatorState, quorum QuorumParams) ([]Assignment, AssignmentInfo, error)

GetOperatorAssignment(state OperatorState, quorum QuorumParams, id OperatorId) (Assignment, error)
GetOperatorAssignment(state OperatorState, quorum QuorumParams, id OperatorId) (Assignment, AssignmentInfo, error)

ValidateConglomerateChunkSize(state OperatorState, headers []BlobHeader, chunkSize uint) error

Expand Down Expand Up @@ -77,7 +76,7 @@ func getStakeThreshold(state OperatorState, quorum QuorumParams) uint {
return uint(stakeThreshold.Uint64())
}

func (c *BasicAssignmentCoordinator) GetAssignments(state OperatorState, quorum QuorumParams) (Assignments, error) {
func (c *BasicAssignmentCoordinator) GetAssignments(state OperatorState, quorum QuorumParams) ([]Assignment, AssignmentInfo, error) {

numOperators := len(state.Operators)
numOperatorsBig := new(big.Int).SetUint64(uint64(numOperators))
Expand Down Expand Up @@ -118,10 +117,9 @@ func (c *BasicAssignmentCoordinator) GetAssignments(state OperatorState, quorum

stakeThreshold := getStakeThreshold(state, quorum)

return Assignments{
return assignments, AssignmentInfo{
StakeThreshold: stakeThreshold,
NumChunks: numChunks,
Assignments: assignments,
TotalChunks: numChunks,
}, nil

}
Expand All @@ -142,19 +140,19 @@ func GetOperatorAtIndex(headerHash [32]byte, index, numOperators int) int {
return int(operatorIndex.Uint64())
}

func (c *BasicAssignmentCoordinator) GetOperatorAssignment(state OperatorState, quorum QuorumParams, id OperatorId) (Assignment, error) {
func (c *BasicAssignmentCoordinator) GetOperatorAssignment(state OperatorState, quorum QuorumParams, id OperatorId) (Assignment, AssignmentInfo, error) {

assignments, err := c.GetAssignments(state, quorum)
assignments, info, err := c.GetAssignments(state, quorum)
if err != nil {
return Assignment{}, err
return Assignment{}, AssignmentInfo{}, err
}

operator, ok := state.OperatorMap[id]
if !ok {
return Assignment{}, ErrNotFound
return Assignment{}, AssignmentInfo{}, ErrNotFound
}

return assignments.Assignments[operator.Index], nil
return assignments[operator.Index], info, nil
}

func (c *BasicAssignmentCoordinator) ValidateConglomerateChunkSize(state OperatorState, headers []BlobHeader, chunkSize uint) error {
Expand Down
32 changes: 22 additions & 10 deletions core/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,36 @@ type BlobHeader struct {
// Conglomerate

type Conglomerate interface {
Header() ConglomerateHeader
Data() [][]byte
ProveInclusion(header BlobHeader, index uint) error
}

type ConglomerateCommitments struct {
Commitment Commitment
DegreeProof Commitment
Degree uint
}

type ConglomerateHeader struct {
Commitment Commitment
DegreeProof Commitment
Degree uint
ChunkSize uint
NumOperators uint
NumBlobs uint
Quorum QuorumParams
ReferenceBlockNumber uint
}

func (h ConglomerateHeader) Commitments() ConglomerateCommitments {
return ConglomerateCommitments{
Commitment: h.Commitment,
DegreeProof: h.DegreeProof,
Degree: h.Degree,
}
}

// Batch

type Batch struct {
Expand All @@ -53,20 +70,15 @@ type HeaderBatch struct {
// Chunks

type Chunk struct {
Header ConglomerateHeader
Data ChunkData
Proof Proof
Data ChunkData
Proof Proof
}

type ChunkData interface {
}
type ChunkData interface{}

type Proof interface {
Verify(data ChunkData, commitment Commitment, indices []ChunkIndex) error
}
type Proof interface{}

type Commitment interface {
}
type Commitment interface{}

type ChunkBatch struct {
Chunks []Chunk
Expand Down
7 changes: 2 additions & 5 deletions core/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ type EncodingParams struct {
NumChunks uint
}

type EncodingGroup interface {
GetEncoder(params EncodingParams) Encoder
}

type Encoder interface {
Encode(conglom Conglomerate) ([]ChunkBatch, error)
Encode(data [][]byte, params EncodingParams) (ConglomerateCommitments, []ChunkBatch, error)
VerifyChunks(chunks []Chunk, indices []ChunkIndex, commitments ConglomerateCommitments, params EncodingParams) error
}
Empty file removed core/encoding/.keep
Empty file.
108 changes: 108 additions & 0 deletions core/encoding/encoder.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package encoding

import (
"errors"

"github.com/Layr-Labs/eigenda/core"
enc "github.com/Layr-Labs/eigenda/pkg/encoding/encoder"
kzgEnc "github.com/Layr-Labs/eigenda/pkg/encoding/kzgEncoder"
"github.com/Layr-Labs/eigenda/pkg/kzg/bn254"
)

var (
ErrConglomFeatureNotSupported = errors.New("conglomerate feature not supported")
)

type Conglomerate struct {
data [][]byte
header core.ConglomerateHeader
}

func (c *Conglomerate) Data() [][]byte {
return c.data
}

func (c *Conglomerate) Header() core.ConglomerateHeader {
return c.header
}

func toEncParams(params core.EncodingParams) enc.EncodingParams {
return enc.EncodingParams{
NumChunks: uint64(params.NumChunks),
ChunkLen: uint64(params.ChunkSize),
}
}

type Encoder struct {
EncoderGroup kzgEnc.KzgEncoderGroup
}

func (e *Encoder) Encode(data [][]byte, params core.EncodingParams) (core.ConglomerateCommitments, []core.Chunk, error) {

encParams := toEncParams(params)

encoder, err := e.EncoderGroup.GetKzgEncoder(encParams)
if err != nil {
return core.ConglomerateCommitments{}, nil, err
}
if len(data) != 1 {
return core.ConglomerateCommitments{}, nil, ErrConglomFeatureNotSupported
}

blob := data[0]

commit, lowDegreeProof, kzgFrames, _, err := encoder.EncodeBytes(blob)
if err != nil {
return core.ConglomerateCommitments{}, nil, ErrConglomFeatureNotSupported
}

chunks := make([]core.Chunk, len(kzgFrames))
for ind, frame := range kzgFrames {

chunks[ind] = core.Chunk{
Data: frame.Coeffs,
Proof: frame.Proof,
}

}

degree := uint(len(enc.ToFrArray(blob)))
commitments := core.ConglomerateCommitments{
Commitment: commit,
DegreeProof: lowDegreeProof,
Degree: degree,
}

return commitments, chunks, nil

}

func (e *Encoder) VerifyChunks(chunks []core.Chunk, indices []core.ChunkIndex, commitments core.ConglomerateCommitments, params core.EncodingParams) error {

encParams := toEncParams(params)

verifier, err := e.EncoderGroup.GetKzgVerifier(encParams)
if err != nil {
return err
}

err = verifier.VerifyCommit(commitments.Commitment.(*bn254.G1Point), commitments.DegreeProof.(*bn254.G1Point), uint64(commitments.Degree))
if err != nil {
return err
}

for ind := range chunks {
err = verifier.VerifyFrame(
commitments.Commitment.(*bn254.G1Point),
chunks[ind].Data.(*kzgEnc.Frame),
uint64(indices[ind]),
)

if err != nil {
return err
}
}

return nil

}
14 changes: 11 additions & 3 deletions node/core/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
)

type ChunkValidator struct {
Encoder core.Encoder
Assignment core.AssignmentCoordinator
ChainData core.ChainData
Id core.OperatorId
Expand All @@ -14,12 +15,19 @@ func NewChunkValidator() *ChunkValidator {
return &ChunkValidator{}
}

func (v *ChunkValidator) ValidateChunk(chunk core.Chunk, header core.ConglomerateHeader) error {
func (v *ChunkValidator) ValidateChunks(chunks []core.Chunk, header core.ConglomerateHeader) error {

operatorState, _ := v.ChainData.GetOperatorState(header.ReferenceBlockNumber)
assignment, _ := v.Assignment.GetOperatorAssignment(operatorState, header.Quorum, v.Id)
assignment, info, _ := v.Assignment.GetOperatorAssignment(operatorState, header.Quorum, v.Id)

params := core.EncodingParams{
ChunkSize: header.ChunkSize,
NumChunks: info.TotalChunks,
}

err := v.Encoder.VerifyChunks(chunks, assignment.GetIndices(), header.Commitments(), params)

// TODO Check that chunk length is equal to that of header

return chunk.Proof.Verify(chunk.Data, chunk.Header.Commitment, assignment.GetIndices())
return err
}
2 changes: 1 addition & 1 deletion node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ type ChunkServer interface {
}

type ChunkValidator interface {
ValidateChunk(chunk core.Chunk, header core.ConglomerateHeader) error
ValidateChunks(chunks []core.Chunk, header core.ConglomerateHeader) error
}
9 changes: 1 addition & 8 deletions pkg/encoding/encoder/encode.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,6 @@ func (g *Encoder) MakeFrames(
defer log.Println("Exiting MakeFrames function")
}

numFrame := g.NumChunks

// reverse dataFr making easier to sample points
err := rb.ReverseBitOrderFr(polyEvals)
if err != nil {
Expand All @@ -90,15 +88,10 @@ func (g *Encoder) MakeFrames(
k := uint64(0)

indices := make([]uint32, 0)
frames := make([]Frame, numFrame)
frames := make([]Frame, g.NumChunks)

for i := uint64(0); i < uint64(g.NumChunks); i++ {

// if collect sufficient chunks
if k == numFrame {
return frames, indices, nil
}

// finds out which coset leader i-th node is having
j := rb.ReverseBitsLimited(uint32(g.NumChunks), uint32(i))

Expand Down
3 changes: 1 addition & 2 deletions pkg/encoding/kzgEncoder/encoder.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package kzgEncoder

import (
"context"
"errors"
"fmt"
"log"
Expand Down Expand Up @@ -131,7 +130,7 @@ func (g *KzgEncoderGroup) NewKzgEncoder(params rs.EncodingParams) (*KzgEncoder,
}

// just a wrapper to take bytes not Fr Element
func (g *KzgEncoder) EncodeBytes(ctx context.Context, inputBytes []byte) (*bls.G1Point, *bls.G1Point, []Frame, []uint32, error) {
func (g *KzgEncoder) EncodeBytes(inputBytes []byte) (*bls.G1Point, *bls.G1Point, []Frame, []uint32, error) {
if g.Verbose {
log.Println("Entering EncodeBytes function")
defer log.Println("Exiting EncodeBytes function")
Expand Down
3 changes: 1 addition & 2 deletions pkg/encoding/kzgEncoder/encoder_fuzz_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package kzgEncoder_test

import (
"context"
"testing"

rs "github.com/Layr-Labs/eigenda/pkg/encoding/encoder"
Expand All @@ -23,7 +22,7 @@ func FuzzOnlySystematic(f *testing.F) {
}

//encode the data
_, _, frames, _, err := enc.EncodeBytes(context.Background(), input)
_, _, frames, _, err := enc.EncodeBytes(input)

for _, frame := range frames {
assert.NotEqual(t, len(frame.Coeffs), 0)
Expand Down
5 changes: 2 additions & 3 deletions pkg/encoding/kzgEncoder/frame_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package kzgEncoder_test

import (
"context"
"math"
"testing"

Expand All @@ -26,7 +25,7 @@ func TestEncodeDecodeFrame_AreInverses(t *testing.T) {
require.Nil(t, err)
require.NotNil(t, enc)

_, _, frames, _, err := enc.EncodeBytes(context.Background(), GETTYSBURG_ADDRESS_BYTES)
_, _, frames, _, err := enc.EncodeBytes(GETTYSBURG_ADDRESS_BYTES)
require.Nil(t, err)
require.NotNil(t, frames, err)

Expand All @@ -53,7 +52,7 @@ func TestVerify(t *testing.T) {
require.Nil(t, err)
require.NotNil(t, enc)

commit, _, frames, _, err := enc.EncodeBytes(context.Background(), GETTYSBURG_ADDRESS_BYTES)
commit, _, frames, _, err := enc.EncodeBytes(GETTYSBURG_ADDRESS_BYTES)
require.Nil(t, err)
require.NotNil(t, commit)
require.NotNil(t, frames)
Expand Down
Loading

0 comments on commit 44a5ccc

Please sign in to comment.