Skip to content

Commit

Permalink
revert: batchstore rewrite gc fixes 137c43c (#236)
Browse files Browse the repository at this point in the history
  • Loading branch information
istae authored Mar 1, 2022
1 parent 137c43c commit d7f7f5c
Showing 1 changed file with 13 additions and 15 deletions.
28 changes: 13 additions & 15 deletions pkg/check/gc/reserve.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ source files in the bee repo before the test is run.
- Cluster must be fresh (i.e. no other previous transactions
made on the underlying eth backend before the cluster is
brought up)
- Initial Radius(Default Depth) = 0
- Initial Radius(Default Depth) = 2
- Bucket Depth = 2
- Reserve Capacity = 16 chunks
- Cache Capacity = 10 chunks
Expand All @@ -96,7 +96,7 @@ A little bit about how the numbers make sense:
*********************************************************
- Buy an initial batch with depth 8 and amount 1 PLUR per
chunk. This makes the initial radius go from 0 to 4.
chunk. This makes the initial radius go from 2 to 4.
- Upload 1 pinned chunk at bin 0 to the node.
- Upload 10 chunks at the initial radius PO. These 10
chunks will later be evicted from the reserve to the
Expand Down Expand Up @@ -158,15 +158,15 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int
const (
cheapBatchAmount = 1
expensiveBatchAmount = 3
batchDepth = uint64(8) // the depth for the batches that we buy

radiusAfterSecondBatch = 5
batchDepth = uint64(8) // the depth for the batches that we buy
initialRadius = 2 // initial reserve radius
higherRadius = initialRadius + 1 // radius needed for the chunks that shouldnt be GCd
)

var (
pinnedChunk = bee.GenerateRandomChunkAt(rnd, overlay, 0)
lowValueChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, radiusAfterSecondBatch-1)
lowValueHigherRadiusChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, radiusAfterSecondBatch)
lowValueChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, initialRadius)
lowValueHigherRadiusChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, higherRadius)
)

origState, err := client.ReserveState(ctx)
Expand All @@ -175,8 +175,8 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int
}

// do some sanity checks to assure test setup is correct
if origState.Radius != 0 {
return fmt.Errorf("wrong initial radius, got %d want %d", origState.Radius, 0)
if origState.Radius != initialRadius {
return fmt.Errorf("wrong initial radius, got %d want %d", origState.Radius, initialRadius)
}
if origState.StorageRadius != 0 {
return fmt.Errorf("wrong initial storage radius, got %d want %d", origState.StorageRadius, 0)
Expand All @@ -189,7 +189,6 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int
if err != nil {
return fmt.Errorf("create batch: %w", err)
}
// radius is 4

_, err = client.UploadChunk(ctx, pinnedChunk.Data(), api.UploadOptions{Pin: true, BatchID: batchID, Deferred: true})
if err != nil {
Expand All @@ -204,21 +203,20 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int
}
}

fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius 4\n", len(lowValueChunks), batchDepth, cheapBatchAmount)
fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius %d\n", len(lowValueChunks), batchDepth, cheapBatchAmount, initialRadius)

highValueBatch, err := client.CreatePostageBatch(ctx, expensiveBatchAmount, batchDepth, o.GasPrice, o.PostageLabel, true)
if err != nil {
return fmt.Errorf("create batch: %w", err)
}
// radius is 5

for _, c := range lowValueHigherRadiusChunks {
if _, err := client.UploadChunk(ctx, c.Data(), api.UploadOptions{BatchID: batchID, Deferred: true}); err != nil {
return fmt.Errorf("low value chunk: %w", err)
}
}

fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius 5\n", len(lowValueHigherRadiusChunks), batchDepth, cheapBatchAmount)
fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius %d\n", len(lowValueHigherRadiusChunks), batchDepth, cheapBatchAmount, higherRadius)

// allow time to sleep so that chunks can get synced and then GCd
time.Sleep(5 * time.Second)
Expand All @@ -229,8 +227,8 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int
}
fmt.Println("Reserve state:", state)

if state.StorageRadius != state.Radius {
return fmt.Errorf("storage radius mismatch. got %d want %d", state.StorageRadius, state.Radius)
if state.StorageRadius != 2 {
return fmt.Errorf("storage radius mismatch. got %d want %d", state.StorageRadius, 2)
}

_, hasCount, err := client.HasChunks(ctx, bee.AddressOfChunk(lowValueChunks...))
Expand Down

0 comments on commit d7f7f5c

Please sign in to comment.