diff --git a/codec_test.go b/codec_test.go index 4ada166..0de964d 100644 --- a/codec_test.go +++ b/codec_test.go @@ -34,10 +34,10 @@ func BenchmarkEncoding(b *testing.B) { } } -func generateRandData(count int, chunkSize int) [][]byte { +func generateRandData(count int, shareSize int) [][]byte { out := make([][]byte, count) for i := 0; i < count; i++ { - randData := make([]byte, chunkSize) + randData := make([]byte, shareSize) _, err := cryptorand.Read(randData) if err != nil { panic(err) @@ -70,8 +70,8 @@ func BenchmarkDecoding(b *testing.B) { } } -func generateMissingData(count int, chunkSize int, codec Codec) [][]byte { - randData := generateRandData(count, chunkSize) +func generateMissingData(count int, shareSize int, codec Codec) [][]byte { + randData := generateRandData(count, shareSize) encoded, err := codec.Encode(randData) if err != nil { panic(err) @@ -98,12 +98,12 @@ func newTestCodec() Codec { return &testCodec{} } -func (c *testCodec) Encode(chunk [][]byte) ([][]byte, error) { - return chunk, nil +func (c *testCodec) Encode(share [][]byte) ([][]byte, error) { + return share, nil } -func (c *testCodec) Decode(chunk [][]byte) ([][]byte, error) { - return chunk, nil +func (c *testCodec) Decode(share [][]byte) ([][]byte, error) { + return share, nil } func (c *testCodec) MaxChunks() int { diff --git a/codecs.go b/codecs.go index 210beac..d11d08d 100644 --- a/codecs.go +++ b/codecs.go @@ -6,8 +6,8 @@ const ( // Leopard is a codec that was originally implemented in the C++ library // https://github.com/catid/leopard. rsmt2d uses a Go port of the C++ // library in https://github.com/klauspost/reedsolomon. The Leopard codec - // uses 8-bit leopard for shards less than or equal to 256. The Leopard - // codec uses 16-bit leopard for shards greater than 256. + // uses 8-bit leopard for shares less than or equal to 256. The Leopard + // codec uses 16-bit leopard for shares greater than 256. Leopard = "Leopard" ) @@ -19,12 +19,13 @@ type Codec interface { // Missing shares must be nil. Returns original + parity data. Decode(data [][]byte) ([][]byte, error) // MaxChunks returns the max number of chunks this codec supports in a 2D - // original data square. + // original data square. Chunk is a synonym of share. MaxChunks() int // Name returns the name of the codec. Name() string // ValidateChunkSize returns an error if this codec does not support - // chunkSize. Returns nil if chunkSize is supported. + // chunkSize. Returns nil if chunkSize is supported. Chunk is a synonym of + // share. ValidateChunkSize(chunkSize int) error } diff --git a/datasquare.go b/datasquare.go index 0c16f59..53535d5 100644 --- a/datasquare.go +++ b/datasquare.go @@ -9,8 +9,9 @@ import ( "golang.org/x/sync/errgroup" ) -// ErrUnevenChunks is thrown when non-nil chunks are not all of equal size. -var ErrUnevenChunks = errors.New("non-nil chunks not all of equal size") +// ErrUnevenChunks is thrown when non-nil shares are not all of equal size. +// Note: chunks is synonymous with shares. +var ErrUnevenChunks = errors.New("non-nil shares not all of equal size") // dataSquare stores all data for an original data square (ODS) or extended // data square (EDS). Data is duplicated in both row-major and column-major @@ -20,7 +21,7 @@ type dataSquare struct { squareCol [][][]byte // col-major dataMutex sync.Mutex width uint - chunkSize uint + shareSize uint rowRoots [][]byte colRoots [][]byte createTreeFn TreeConstructorFn @@ -29,14 +30,15 @@ type dataSquare struct { // newDataSquare populates the data square from the supplied data and treeCreator. // No root calculation is performed. // data may have nil values. -func newDataSquare(data [][]byte, treeCreator TreeConstructorFn, chunkSize uint) (*dataSquare, error) { +func newDataSquare(data [][]byte, treeCreator TreeConstructorFn, shareSize uint) (*dataSquare, error) { width := int(math.Ceil(math.Sqrt(float64(len(data))))) if width*width != len(data) { + // TODO: export this error and modify chunks to shares return nil, errors.New("number of chunks must be a square number") } for _, d := range data { - if d != nil && len(d) != int(chunkSize) { + if d != nil && len(d) != int(shareSize) { return nil, ErrUnevenChunks } } @@ -46,7 +48,7 @@ func newDataSquare(data [][]byte, treeCreator TreeConstructorFn, chunkSize uint) squareRow[i] = data[i*width : i*width+width] for j := 0; j < width; j++ { - if squareRow[i][j] != nil && len(squareRow[i][j]) != int(chunkSize) { + if squareRow[i][j] != nil && len(squareRow[i][j]) != int(shareSize) { return nil, ErrUnevenChunks } } @@ -64,15 +66,16 @@ func newDataSquare(data [][]byte, treeCreator TreeConstructorFn, chunkSize uint) squareRow: squareRow, squareCol: squareCol, width: uint(width), - chunkSize: chunkSize, + shareSize: shareSize, createTreeFn: treeCreator, }, nil } // extendSquare extends the original data square by extendedWidth and fills -// the extended quadrants with fillerChunk. -func (ds *dataSquare) extendSquare(extendedWidth uint, fillerChunk []byte) error { - if uint(len(fillerChunk)) != ds.chunkSize { +// the extended quadrants with fillerShare. +func (ds *dataSquare) extendSquare(extendedWidth uint, fillerShare []byte) error { + if uint(len(fillerShare)) != ds.shareSize { + // TODO: export this error and rename chunk to share return errors.New("filler chunk size does not match data square chunk size") } @@ -81,12 +84,12 @@ func (ds *dataSquare) extendSquare(extendedWidth uint, fillerChunk []byte) error fillerExtendedRow := make([][]byte, extendedWidth) for i := uint(0); i < extendedWidth; i++ { - fillerExtendedRow[i] = fillerChunk + fillerExtendedRow[i] = fillerShare } fillerRow := make([][]byte, newWidth) for i := uint(0); i < newWidth; i++ { - fillerRow[i] = fillerChunk + fillerRow[i] = fillerShare } row := make([][]byte, ds.width) @@ -129,7 +132,8 @@ func (ds *dataSquare) row(x uint) [][]byte { func (ds *dataSquare) setRowSlice(x uint, y uint, newRow [][]byte) error { for i := uint(0); i < uint(len(newRow)); i++ { - if len(newRow[i]) != int(ds.chunkSize) { + if len(newRow[i]) != int(ds.shareSize) { + // TODO: export this error and rename chunk to share return errors.New("invalid chunk size") } } @@ -162,7 +166,8 @@ func (ds *dataSquare) col(y uint) [][]byte { func (ds *dataSquare) setColSlice(x uint, y uint, newCol [][]byte) error { for i := uint(0); i < uint(len(newCol)); i++ { - if len(newCol[i]) != int(ds.chunkSize) { + if len(newCol[i]) != int(ds.shareSize) { + // TODO: export this error and rename chunk to share return errors.New("invalid chunk size") } } @@ -307,22 +312,23 @@ func (ds *dataSquare) GetCell(x uint, y uint) []byte { if ds.squareRow[x][y] == nil { return nil } - cell := make([]byte, ds.chunkSize) + cell := make([]byte, ds.shareSize) copy(cell, ds.squareRow[x][y]) return cell } // SetCell sets a specific cell. The cell to set must be `nil`. Returns an error -// if the cell to set is not `nil` or newChunk is not the correct size. -func (ds *dataSquare) SetCell(x uint, y uint, newChunk []byte) error { +// if the cell to set is not `nil` or newShare is not the correct size. +func (ds *dataSquare) SetCell(x uint, y uint, newShare []byte) error { if ds.squareRow[x][y] != nil { return fmt.Errorf("cannot set cell (%d, %d) as it already has a value %x", x, y, ds.squareRow[x][y]) } - if len(newChunk) != int(ds.chunkSize) { - return fmt.Errorf("cannot set cell with chunk size %d because dataSquare chunk size is %d", len(newChunk), ds.chunkSize) + if len(newShare) != int(ds.shareSize) { + // TODO: export this error and rename chunk to share + return fmt.Errorf("cannot set cell with chunk size %d because dataSquare chunk size is %d", len(newShare), ds.shareSize) } - ds.squareRow[x][y] = newChunk - ds.squareCol[y][x] = newChunk + ds.squareRow[x][y] = newShare + ds.squareCol[y][x] = newShare ds.resetRoots() return nil } diff --git a/datasquare_test.go b/datasquare_test.go index 69fe033..47170ae 100644 --- a/datasquare_test.go +++ b/datasquare_test.go @@ -17,7 +17,7 @@ func TestNewDataSquare(t *testing.T) { name string cells [][]byte expected [][][]byte - chunkSize uint + shareSize uint }{ {"1x1", [][]byte{{1, 2}}, [][][]byte{{{1, 2}}}, 2}, {"2x2", [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}}, [][][]byte{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}, 2}, @@ -25,7 +25,7 @@ func TestNewDataSquare(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - result, err := newDataSquare(test.cells, NewDefaultTree, test.chunkSize) + result, err := newDataSquare(test.cells, NewDefaultTree, test.shareSize) if err != nil { panic(err) } @@ -40,16 +40,16 @@ func TestInvalidDataSquareCreation(t *testing.T) { tests := []struct { name string cells [][]byte - chunkSize uint + shareSize uint }{ - {"InconsistentChunkNumber", [][]byte{{1, 2}, {3, 4}, {5, 6}}, 2}, - {"UnequalChunkSize", [][]byte{{1, 2}, {3, 4}, {5, 6}, {7}}, 2}, + {"InconsistentShareNumber", [][]byte{{1, 2}, {3, 4}, {5, 6}}, 2}, + {"UnequalShareSize", [][]byte{{1, 2}, {3, 4}, {5, 6}, {7}}, 2}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := newDataSquare(test.cells, NewDefaultTree, test.chunkSize) + _, err := newDataSquare(test.cells, NewDefaultTree, test.shareSize) if err == nil { - t.Errorf("newDataSquare failed; chunks accepted with %v", test.name) + t.Errorf("newDataSquare failed; shares accepted with %v", test.name) } }) } @@ -76,9 +76,9 @@ func TestSetCell(t *testing.T) { wantErr: true, }, { - name: "expect error if new cell is not the correct chunk size", + name: "expect error if new cell is not the correct share size", originalCell: nil, - newCell: []byte{1, 2}, // incorrect chunk size + newCell: []byte{1, 2}, // incorrect share size wantErr: true, }, } @@ -186,7 +186,7 @@ func TestInvalidSquareExtension(t *testing.T) { } err = ds.extendSquare(1, []byte{0}) if err == nil { - t.Errorf("extendSquare failed; error not returned when filler chunk size does not match data square chunk size") + t.Errorf("extendSquare failed; error not returned when filler share size does not match data square share size") } } @@ -318,7 +318,7 @@ func Test_setRowSlice(t *testing.T) { wantErr: false, }, { - name: "returns an error if the new row has an invalid chunk size", + name: "returns an error if the new row has an invalid share size", newRow: [][]byte{{5, 6}}, x: 0, y: 0, @@ -374,7 +374,7 @@ func Test_setColSlice(t *testing.T) { wantErr: false, }, { - name: "returns an error if the new col has an invalid chunk size", + name: "returns an error if the new col has an invalid share size", newCol: [][]byte{{5, 6}}, x: 0, y: 0, @@ -410,7 +410,7 @@ func BenchmarkEDSRootsWithDefaultTree(b *testing.B) { b.Errorf("Failure to create square of size %d: %s", i, err) } b.Run( - fmt.Sprintf("%dx%dx%d ODS", i, i, int(square.chunkSize)), + fmt.Sprintf("%dx%dx%d ODS", i, i, int(square.shareSize)), func(b *testing.B) { for n := 0; n < b.N; n++ { square.resetRoots() @@ -458,7 +458,7 @@ func BenchmarkEDSRootsWithErasuredNMT(b *testing.B) { edsSizeMiBytes := 4 * odsSizeMiBytes b.Run( fmt.Sprintf("%dx%dx%d ODS=%dMB, EDS=%dMB", odsSize, odsSize, - int(square.chunkSize), + int(square.shareSize), odsSizeMiBytes, edsSizeMiBytes), func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -521,8 +521,8 @@ func (d *errorTree) Root() ([]byte, error) { // setCell overwrites the contents of a specific cell. setCell does not perform // any input validation so most use cases should use `SetCell` instead of // `setCell`. This method exists strictly for testing. -func (ds *dataSquare) setCell(x uint, y uint, newChunk []byte) { - ds.squareRow[x][y] = newChunk - ds.squareCol[y][x] = newChunk +func (ds *dataSquare) setCell(x uint, y uint, newShare []byte) { + ds.squareRow[x][y] = newShare + ds.squareCol[y][x] = newShare ds.resetRoots() } diff --git a/extendeddatacrossword.go b/extendeddatacrossword.go index ade26e8..dbd7621 100644 --- a/extendeddatacrossword.go +++ b/extendeddatacrossword.go @@ -33,7 +33,7 @@ func (a Axis) String() string { } } -// ErrUnrepairableDataSquare is thrown when there is insufficient chunks to repair the square. +// ErrUnrepairableDataSquare is thrown when there is insufficient shares to repair the square. var ErrUnrepairableDataSquare = errors.New("failed to solve data square") // ErrByzantineData is returned when a repaired row or column does not match the @@ -380,7 +380,7 @@ func (eds *ExtendedDataSquare) preRepairSanityCheck( if err != nil { return err } - if !bytes.Equal(flattenChunks(parityShares), flattenChunks(eds.rowSlice(i, eds.originalDataWidth, eds.originalDataWidth))) { + if !bytes.Equal(flattenShares(parityShares), flattenShares(eds.rowSlice(i, eds.originalDataWidth, eds.originalDataWidth))) { return &ErrByzantineData{Row, i, eds.row(i)} } return nil @@ -410,7 +410,7 @@ func (eds *ExtendedDataSquare) preRepairSanityCheck( if err != nil { return err } - if !bytes.Equal(flattenChunks(parityShares), flattenChunks(eds.colSlice(eds.originalDataWidth, i, eds.originalDataWidth))) { + if !bytes.Equal(flattenShares(parityShares), flattenShares(eds.colSlice(eds.originalDataWidth, i, eds.originalDataWidth))) { return &ErrByzantineData{Col, i, eds.col(i)} } return nil diff --git a/extendeddatacrossword_test.go b/extendeddatacrossword_test.go index 470ef4b..bd1982e 100644 --- a/extendeddatacrossword_test.go +++ b/extendeddatacrossword_test.go @@ -82,7 +82,7 @@ func TestRepairExtendedDataSquare(t *testing.T) { func TestValidFraudProof(t *testing.T) { codec := NewLeoRSCodec() - corruptChunk := bytes.Repeat([]byte{66}, shareSize) + corruptShare := bytes.Repeat([]byte{66}, shareSize) original := createTestEds(codec, shareSize) @@ -91,7 +91,7 @@ func TestValidFraudProof(t *testing.T) { if err != nil { t.Fatalf("unexpected err while copying original data: %v, codec: :%s", err, codec.Name()) } - corrupted.setCell(0, 0, corruptChunk) + corrupted.setCell(0, 0, corruptShare) assert.NoError(t, err) rowRoots, err := corrupted.getRowRoots() @@ -122,7 +122,7 @@ func TestValidFraudProof(t *testing.T) { t.Errorf("could not encode fraud proof shares; %v", fraudProof) } startIndex := len(rebuiltShares) - int(corrupted.originalDataWidth) - if bytes.Equal(flattenChunks(parityShares), flattenChunks(rebuiltShares[startIndex:])) { + if bytes.Equal(flattenShares(parityShares), flattenShares(rebuiltShares[startIndex:])) { t.Errorf("invalid fraud proof %v", fraudProof) } } @@ -131,7 +131,7 @@ func TestValidFraudProof(t *testing.T) { func TestCannotRepairSquareWithBadRoots(t *testing.T) { codec := NewLeoRSCodec() - corruptChunk := bytes.Repeat([]byte{66}, shareSize) + corruptShare := bytes.Repeat([]byte{66}, shareSize) original := createTestEds(codec, shareSize) rowRoots, err := original.RowRoots() @@ -140,7 +140,7 @@ func TestCannotRepairSquareWithBadRoots(t *testing.T) { colRoots, err := original.ColRoots() require.NoError(t, err) - original.setCell(0, 0, corruptChunk) + original.setCell(0, 0, corruptShare) require.NoError(t, err) err = original.Repair(rowRoots, colRoots) if err == nil { @@ -149,7 +149,7 @@ func TestCannotRepairSquareWithBadRoots(t *testing.T) { } func TestCorruptedEdsReturnsErrByzantineData(t *testing.T) { - corruptChunk := bytes.Repeat([]byte{66}, shareSize) + corruptShare := bytes.Repeat([]byte{66}, shareSize) tests := []struct { name string @@ -157,24 +157,24 @@ func TestCorruptedEdsReturnsErrByzantineData(t *testing.T) { values [][]byte }{ { - name: "corrupt a chunk in the original data square", + name: "corrupt a share in the original data square", coords: [][]uint{{0, 0}}, - values: [][]byte{corruptChunk}, + values: [][]byte{corruptShare}, }, { - name: "corrupt a chunk in the extended data square", + name: "corrupt a share in the extended data square", coords: [][]uint{{0, 3}}, - values: [][]byte{corruptChunk}, + values: [][]byte{corruptShare}, }, { - name: "corrupt a chunk at (0, 0) and delete shares from the rest of the row", + name: "corrupt a share at (0, 0) and delete shares from the rest of the row", coords: [][]uint{{0, 0}, {0, 1}, {0, 2}, {0, 3}}, - values: [][]byte{corruptChunk, nil, nil, nil}, + values: [][]byte{corruptShare, nil, nil, nil}, }, { - name: "corrupt a chunk at (3, 0) and delete part of the first row ", + name: "corrupt a share at (3, 0) and delete part of the first row ", coords: [][]uint{{3, 0}, {0, 1}, {0, 2}, {0, 3}}, - values: [][]byte{corruptChunk, nil, nil, nil}, + values: [][]byte{corruptShare, nil, nil, nil}, }, { // This test case sets all shares along the diagonal to nil so that @@ -190,7 +190,7 @@ func TestCorruptedEdsReturnsErrByzantineData(t *testing.T) { // O O _ O // O O O _ coords: [][]uint{{0, 0}, {1, 1}, {2, 2}, {3, 3}, {0, 1}}, - values: [][]byte{nil, nil, nil, nil, corruptChunk}, + values: [][]byte{nil, nil, nil, nil, corruptShare}, }, } @@ -221,7 +221,7 @@ func TestCorruptedEdsReturnsErrByzantineData(t *testing.T) { var byzData *ErrByzantineData assert.ErrorAs(t, err, &byzData, "did not return a ErrByzantineData for a bad col or row") assert.NotEmpty(t, byzData.Shares) - assert.Contains(t, byzData.Shares, corruptChunk) + assert.Contains(t, byzData.Shares, corruptShare) }) } } @@ -231,7 +231,7 @@ func BenchmarkRepair(b *testing.B) { for originalDataWidth := 4; originalDataWidth <= 512; originalDataWidth *= 2 { codec := NewLeoRSCodec() if codec.MaxChunks() < originalDataWidth*originalDataWidth { - // Only test codecs that support this many chunks + // Only test codecs that support this many shares continue } diff --git a/extendeddatasquare.go b/extendeddatasquare.go index d076dd1..551e701 100644 --- a/extendeddatasquare.go +++ b/extendeddatasquare.go @@ -45,7 +45,7 @@ func (eds *ExtendedDataSquare) UnmarshalJSON(b []byte) error { return nil } -// ComputeExtendedDataSquare computes the extended data square for some chunks +// ComputeExtendedDataSquare computes the extended data square for some shares // of original data. func ComputeExtendedDataSquare( data [][]byte, @@ -53,15 +53,16 @@ func ComputeExtendedDataSquare( treeCreatorFn TreeConstructorFn, ) (*ExtendedDataSquare, error) { if len(data) > codec.MaxChunks() { + // TODO: export this error and rename chunk to share return nil, errors.New("number of chunks exceeds the maximum") } - chunkSize := getChunkSize(data) - err := codec.ValidateChunkSize(chunkSize) + shareSize := getShareSize(data) + err := codec.ValidateChunkSize(shareSize) if err != nil { return nil, err } - ds, err := newDataSquare(data, treeCreatorFn, uint(chunkSize)) + ds, err := newDataSquare(data, treeCreatorFn, uint(shareSize)) if err != nil { return nil, err } @@ -75,22 +76,23 @@ func ComputeExtendedDataSquare( return &eds, nil } -// ImportExtendedDataSquare imports an extended data square, represented as flattened chunks of data. +// ImportExtendedDataSquare imports an extended data square, represented as flattened shares of data. func ImportExtendedDataSquare( data [][]byte, codec Codec, treeCreatorFn TreeConstructorFn, ) (*ExtendedDataSquare, error) { if len(data) > 4*codec.MaxChunks() { + // TODO: export this error and rename chunk to share return nil, errors.New("number of chunks exceeds the maximum") } - chunkSize := getChunkSize(data) - err := codec.ValidateChunkSize(chunkSize) + shareSize := getShareSize(data) + err := codec.ValidateChunkSize(shareSize) if err != nil { return nil, err } - ds, err := newDataSquare(data, treeCreatorFn, uint(chunkSize)) + ds, err := newDataSquare(data, treeCreatorFn, uint(shareSize)) if err != nil { return nil, err } @@ -109,18 +111,18 @@ func ImportExtendedDataSquare( // NewExtendedDataSquare returns a new extended data square with a width of // edsWidth. All shares are initialized to nil so that the returned extended // data square can be populated via subsequent SetCell invocations. -func NewExtendedDataSquare(codec Codec, treeCreatorFn TreeConstructorFn, edsWidth uint, chunkSize uint) (*ExtendedDataSquare, error) { +func NewExtendedDataSquare(codec Codec, treeCreatorFn TreeConstructorFn, edsWidth uint, shareSize uint) (*ExtendedDataSquare, error) { err := validateEdsWidth(edsWidth) if err != nil { return nil, err } - err = codec.ValidateChunkSize(int(chunkSize)) + err = codec.ValidateChunkSize(int(shareSize)) if err != nil { return nil, err } data := make([][]byte, edsWidth*edsWidth) - dataSquare, err := newDataSquare(data, treeCreatorFn, chunkSize) + dataSquare, err := newDataSquare(data, treeCreatorFn, shareSize) if err != nil { return nil, err } @@ -137,8 +139,8 @@ func NewExtendedDataSquare(codec Codec, treeCreatorFn TreeConstructorFn, edsWidt func (eds *ExtendedDataSquare) erasureExtendSquare(codec Codec) error { eds.originalDataWidth = eds.width - // Extend original square with filler chunks. O represents original data. F - // represents filler chunks. + // Extend original square with filler shares. O represents original data. F + // represents filler shares. // // ------- ------- // | | | @@ -149,13 +151,13 @@ func (eds *ExtendedDataSquare) erasureExtendSquare(codec Codec) error { // | F | F | // | | | // ------- ------- - if err := eds.extendSquare(eds.width, bytes.Repeat([]byte{0}, int(eds.chunkSize))); err != nil { + if err := eds.extendSquare(eds.width, bytes.Repeat([]byte{0}, int(eds.shareSize))); err != nil { return err } errs, _ := errgroup.WithContext(context.Background()) - // Populate filler chunks in Q1 and Q2. E represents erasure data. + // Populate filler shares in Q1 and Q2. E represents erasure data. // // ------- ------- // | | | @@ -184,7 +186,7 @@ func (eds *ExtendedDataSquare) erasureExtendSquare(codec Codec) error { return err } - // Populate filler chunks in Q3. Note that the parity data in `Q3` will be + // Populate filler shares in Q3. Note that the parity data in `Q3` will be // identical if it is vertically extended from `Q1` or horizontally extended // from `Q2`. // @@ -301,7 +303,7 @@ func (eds *ExtendedDataSquare) Equals(other *ExtendedDataSquare) bool { if eds.codec.Name() != other.codec.Name() { return false } - if eds.chunkSize != other.chunkSize { + if eds.shareSize != other.shareSize { return false } if eds.width != other.width { @@ -350,8 +352,8 @@ func validateEdsWidth(edsWidth uint) error { return nil } -// getChunkSize returns the size of the first non-nil chunk in data. -func getChunkSize(data [][]byte) (chunkSize int) { +// getShareSize returns the size of the first non-nil share in data. +func getShareSize(data [][]byte) (shareSize int) { for _, d := range data { if d != nil { return len(d) diff --git a/extendeddatasquare_test.go b/extendeddatasquare_test.go index a532399..6afd17d 100644 --- a/extendeddatasquare_test.go +++ b/extendeddatasquare_test.go @@ -66,9 +66,9 @@ func TestComputeExtendedDataSquare(t *testing.T) { }) } - t.Run("returns an error if chunkSize is not a multiple of 64", func(t *testing.T) { - chunk := bytes.Repeat([]byte{1}, 65) - _, err := ComputeExtendedDataSquare([][]byte{chunk}, NewLeoRSCodec(), NewDefaultTree) + t.Run("returns an error if shareSize is not a multiple of 64", func(t *testing.T) { + share := bytes.Repeat([]byte{1}, 65) + _, err := ComputeExtendedDataSquare([][]byte{share}, NewLeoRSCodec(), NewDefaultTree) assert.Error(t, err) }) } @@ -80,9 +80,9 @@ func TestImportExtendedDataSquare(t *testing.T) { assert.NoError(t, err) assert.Equal(t, eds.Flattened(), got.Flattened()) }) - t.Run("returns an error if chunkSize is not a multiple of 64", func(t *testing.T) { - chunk := bytes.Repeat([]byte{1}, 65) - _, err := ImportExtendedDataSquare([][]byte{chunk}, NewLeoRSCodec(), NewDefaultTree) + t.Run("returns an error if shareSize is not a multiple of 64", func(t *testing.T) { + share := bytes.Repeat([]byte{1}, 65) + _, err := ImportExtendedDataSquare([][]byte{share}, NewLeoRSCodec(), NewDefaultTree) assert.Error(t, err) }) } @@ -119,11 +119,11 @@ func TestNewExtendedDataSquare(t *testing.T) { _, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, shareSize) assert.Error(t, err) }) - t.Run("returns an error if chunkSize is not a multiple of 64", func(t *testing.T) { + t.Run("returns an error if shareSize is not a multiple of 64", func(t *testing.T) { edsWidth := uint(1) - chunkSize := uint(65) + shareSize := uint(65) - _, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, chunkSize) + _, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, shareSize) assert.Error(t, err) }) t.Run("returns a 4x4 EDS", func(t *testing.T) { @@ -132,7 +132,7 @@ func TestNewExtendedDataSquare(t *testing.T) { got, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, shareSize) assert.NoError(t, err) assert.Equal(t, edsWidth, got.width) - assert.Equal(t, uint(shareSize), got.chunkSize) + assert.Equal(t, uint(shareSize), got.shareSize) }) t.Run("returns a 4x4 EDS that can be populated via SetCell", func(t *testing.T) { edsWidth := uint(4) @@ -140,20 +140,20 @@ func TestNewExtendedDataSquare(t *testing.T) { got, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, shareSize) assert.NoError(t, err) - chunk := bytes.Repeat([]byte{1}, int(shareSize)) - err = got.SetCell(0, 0, chunk) + share := bytes.Repeat([]byte{1}, int(shareSize)) + err = got.SetCell(0, 0, share) assert.NoError(t, err) - assert.Equal(t, chunk, got.squareRow[0][0]) + assert.Equal(t, share, got.squareRow[0][0]) }) - t.Run("returns an error when SetCell is invoked on an EDS with a chunk that is not the correct size", func(t *testing.T) { + t.Run("returns an error when SetCell is invoked on an EDS with a share that is not the correct size", func(t *testing.T) { edsWidth := uint(4) - incorrectChunkSize := shareSize + 1 + incorrectShareSize := shareSize + 1 got, err := NewExtendedDataSquare(NewLeoRSCodec(), NewDefaultTree, edsWidth, shareSize) assert.NoError(t, err) - chunk := bytes.Repeat([]byte{1}, incorrectChunkSize) - err = got.SetCell(0, 0, chunk) + share := bytes.Repeat([]byte{1}, incorrectShareSize) + err = got.SetCell(0, 0, share) assert.Error(t, err) }) } @@ -282,7 +282,7 @@ func BenchmarkExtensionEncoding(b *testing.B) { for i := 4; i < 513; i *= 2 { for codecName, codec := range codecs { if codec.MaxChunks() < i*i { - // Only test codecs that support this many chunks + // Only test codecs that support this many shares continue } @@ -309,7 +309,7 @@ func BenchmarkExtensionWithRoots(b *testing.B) { for i := 4; i < 513; i *= 2 { for codecName, codec := range codecs { if codec.MaxChunks() < i*i { - // Only test codecs that support this many chunks + // Only test codecs that support this many shares continue } @@ -334,11 +334,11 @@ func BenchmarkExtensionWithRoots(b *testing.B) { // genRandDS make a datasquare of random data, with width describing the number // of shares on a single side of the ds -func genRandDS(width int, chunkSize int) [][]byte { +func genRandDS(width int, shareSize int) [][]byte { var ds [][]byte count := width * width for i := 0; i < count; i++ { - share := make([]byte, chunkSize) + share := make([]byte, shareSize) _, err := rand.Read(share) if err != nil { panic(err) @@ -348,8 +348,8 @@ func genRandDS(width int, chunkSize int) [][]byte { return ds } -func genRandSortedDS(width int, chunkSize int, namespaceSize int) [][]byte { - ds := genRandDS(width, chunkSize) +func genRandSortedDS(width int, shareSize int, namespaceSize int) [][]byte { + ds := genRandDS(width, shareSize) // Sort the shares in the square based on their namespace sort.Slice(ds, func(i, j int) bool { @@ -407,7 +407,7 @@ func TestEquals(t *testing.T) { unequalCodecs := createExampleEds(t, shareSize) unequalCodecs.codec = newTestCodec() - unequalChunkSize := createExampleEds(t, shareSize*2) + unequalShareSize := createExampleEds(t, shareSize*2) unequalEds, err := ComputeExtendedDataSquare([][]byte{ones}, NewLeoRSCodec(), NewDefaultTree) require.NoError(t, err) @@ -422,8 +422,8 @@ func TestEquals(t *testing.T) { other: unequalCodecs, }, { - name: "unequal chunkSize", - other: unequalChunkSize, + name: "unequal shareSize", + other: unequalShareSize, }, { name: "unequalEds", @@ -482,11 +482,11 @@ func TestRoots(t *testing.T) { }) } -func createExampleEds(t *testing.T, chunkSize int) (eds *ExtendedDataSquare) { - ones := bytes.Repeat([]byte{1}, chunkSize) - twos := bytes.Repeat([]byte{2}, chunkSize) - threes := bytes.Repeat([]byte{3}, chunkSize) - fours := bytes.Repeat([]byte{4}, chunkSize) +func createExampleEds(t *testing.T, shareSize int) (eds *ExtendedDataSquare) { + ones := bytes.Repeat([]byte{1}, shareSize) + twos := bytes.Repeat([]byte{2}, shareSize) + threes := bytes.Repeat([]byte{3}, shareSize) + fours := bytes.Repeat([]byte{4}, shareSize) ods := [][]byte{ ones, twos, threes, fours, diff --git a/leopard.go b/leopard.go index 3c3d256..766f0bd 100644 --- a/leopard.go +++ b/leopard.go @@ -32,16 +32,16 @@ func (l *LeoRSCodec) Encode(data [][]byte) ([][]byte, error) { return nil, err } - shards := make([][]byte, dataLen*2) - copy(shards, data) - for i := dataLen; i < len(shards); i++ { - shards[i] = make([]byte, len(data[0])) + shares := make([][]byte, dataLen*2) + copy(shares, data) + for i := dataLen; i < len(shares); i++ { + shares[i] = make([]byte, len(data[0])) } - if err := enc.Encode(shards); err != nil { + if err := enc.Encode(shares); err != nil { return nil, err } - return shards[dataLen:], nil + return shares[dataLen:], nil } func (l *LeoRSCodec) Decode(data [][]byte) ([][]byte, error) { @@ -67,7 +67,7 @@ func (l *LeoRSCodec) loadOrInitEncoder(dataLen int) (reedsolomon.Encoder, error) return enc.(reedsolomon.Encoder), nil } -// MaxChunks returns the max number of chunks this codec supports in a 2D +// MaxChunks returns the max number of shares this codec supports in a 2D // original data square. func (l *LeoRSCodec) MaxChunks() int { // klauspost/reedsolomon supports an EDS width of 65536. See: @@ -75,7 +75,7 @@ func (l *LeoRSCodec) MaxChunks() int { maxEDSWidth := 65536 // An EDS width of 65536 is an ODS width of 32768. maxODSWidth := maxEDSWidth / 2 - // The max number of chunks in a 2D original data square is 32768 * 32768. + // The max number of shares in a 2D original data square is 32768 * 32768. return maxODSWidth * maxODSWidth } @@ -84,12 +84,12 @@ func (l *LeoRSCodec) Name() string { } // ValidateChunkSize returns an error if this codec does not support -// chunkSize. Returns nil if chunkSize is supported. -func (l *LeoRSCodec) ValidateChunkSize(chunkSize int) error { +// shareSize. Returns nil if shareSize is supported. +func (l *LeoRSCodec) ValidateChunkSize(shareSize int) error { // See https://github.com/catid/leopard/blob/22ddc7804998d31c8f1a2617ee720e063b1fa6cd/README.md?plain=1#L27 // See https://github.com/klauspost/reedsolomon/blob/fd3e6910a7e457563469172968f456ad9b7696b6/README.md?plain=1#L403 - if chunkSize%64 != 0 { - return fmt.Errorf("chunkSize %v must be a multiple of 64 bytes", chunkSize) + if shareSize%64 != 0 { + return fmt.Errorf("shareSize %v must be a multiple of 64 bytes", shareSize) } return nil } diff --git a/rsmt2d_test.go b/rsmt2d_test.go index 417ee89..f4062db 100644 --- a/rsmt2d_test.go +++ b/rsmt2d_test.go @@ -158,19 +158,19 @@ func TestEdsRepairTwice(t *testing.T) { // TestRepairWithOneQuarterPopulated is motivated by a use case from // celestia-node. It verifies that a new EDS can be populated via SetCell. After -// enough chunks have been populated, it verifies that the EDS can be repaired. +// enough shares have been populated, it verifies that the EDS can be repaired. // After the EDS is repaired, the test verifies that data in a repaired cell // matches the expected data. func TestRepairWithOneQuarterPopulated(t *testing.T) { edsWidth := 4 - chunkSize := 512 + shareSize := 512 - exampleEds := createExampleEds(t, chunkSize) + exampleEds := createExampleEds(t, shareSize) - eds, err := rsmt2d.NewExtendedDataSquare(rsmt2d.NewLeoRSCodec(), rsmt2d.NewDefaultTree, uint(edsWidth), uint(chunkSize)) + eds, err := rsmt2d.NewExtendedDataSquare(rsmt2d.NewLeoRSCodec(), rsmt2d.NewDefaultTree, uint(edsWidth), uint(shareSize)) require.NoError(t, err) - // Populate EDS with 1/4 of chunks using SetCell + // Populate EDS with 1/4 of shares using SetCell err = eds.SetCell(0, 0, exampleEds.GetCell(0, 0)) require.NoError(t, err) err = eds.SetCell(0, 1, exampleEds.GetCell(0, 1)) @@ -195,11 +195,11 @@ func TestRepairWithOneQuarterPopulated(t *testing.T) { assert.Equal(t, exampleEds.Flattened(), eds.Flattened()) } -func createExampleEds(t *testing.T, chunkSize int) (eds *rsmt2d.ExtendedDataSquare) { - ones := bytes.Repeat([]byte{1}, chunkSize) - twos := bytes.Repeat([]byte{2}, chunkSize) - threes := bytes.Repeat([]byte{3}, chunkSize) - fours := bytes.Repeat([]byte{4}, chunkSize) +func createExampleEds(t *testing.T, shareSize int) (eds *rsmt2d.ExtendedDataSquare) { + ones := bytes.Repeat([]byte{1}, shareSize) + twos := bytes.Repeat([]byte{2}, shareSize) + threes := bytes.Repeat([]byte{3}, shareSize) + fours := bytes.Repeat([]byte{4}, shareSize) ods := [][]byte{ ones, twos, threes, fours, diff --git a/utils.go b/utils.go index a385dcf..15163b1 100644 --- a/utils.go +++ b/utils.go @@ -1,14 +1,14 @@ package rsmt2d -func flattenChunks(chunks [][]byte) []byte { +func flattenShares(shares [][]byte) []byte { length := 0 - for _, chunk := range chunks { - length += len(chunk) + for _, share := range shares { + length += len(share) } flattened := make([]byte, 0, length) - for _, chunk := range chunks { - flattened = append(flattened, chunk...) + for _, share := range shares { + flattened = append(flattened, share...) } return flattened