From d7f7f5c409980d980c2afb52daa415c232e3dfd2 Mon Sep 17 00:00:00 2001 From: istae <14264581+istae@users.noreply.github.com> Date: Tue, 1 Mar 2022 19:37:23 +0300 Subject: [PATCH] revert: batchstore rewrite gc fixes 137c43c6eb7d8b4d58eef0f1137e2c8366508029 (#236) --- pkg/check/gc/reserve.go | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/pkg/check/gc/reserve.go b/pkg/check/gc/reserve.go index e91644edb..046e9ccf0 100644 --- a/pkg/check/gc/reserve.go +++ b/pkg/check/gc/reserve.go @@ -70,7 +70,7 @@ source files in the bee repo before the test is run. - Cluster must be fresh (i.e. no other previous transactions made on the underlying eth backend before the cluster is brought up) -- Initial Radius(Default Depth) = 0 +- Initial Radius(Default Depth) = 2 - Bucket Depth = 2 - Reserve Capacity = 16 chunks - Cache Capacity = 10 chunks @@ -96,7 +96,7 @@ A little bit about how the numbers make sense: ********************************************************* - Buy an initial batch with depth 8 and amount 1 PLUR per - chunk. This makes the initial radius go from 0 to 4. + chunk. This makes the initial radius go from 2 to 4. - Upload 1 pinned chunk at bin 0 to the node. - Upload 10 chunks at the initial radius PO. These 10 chunks will later be evicted from the reserve to the @@ -158,15 +158,15 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int const ( cheapBatchAmount = 1 expensiveBatchAmount = 3 - batchDepth = uint64(8) // the depth for the batches that we buy - - radiusAfterSecondBatch = 5 + batchDepth = uint64(8) // the depth for the batches that we buy + initialRadius = 2 // initial reserve radius + higherRadius = initialRadius + 1 // radius needed for the chunks that shouldnt be GCd ) var ( pinnedChunk = bee.GenerateRandomChunkAt(rnd, overlay, 0) - lowValueChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, radiusAfterSecondBatch-1) - lowValueHigherRadiusChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, radiusAfterSecondBatch) + lowValueChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, initialRadius) + lowValueHigherRadiusChunks = bee.GenerateNRandomChunksAt(rnd, overlay, 10, higherRadius) ) origState, err := client.ReserveState(ctx) @@ -175,8 +175,8 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int } // do some sanity checks to assure test setup is correct - if origState.Radius != 0 { - return fmt.Errorf("wrong initial radius, got %d want %d", origState.Radius, 0) + if origState.Radius != initialRadius { + return fmt.Errorf("wrong initial radius, got %d want %d", origState.Radius, initialRadius) } if origState.StorageRadius != 0 { return fmt.Errorf("wrong initial storage radius, got %d want %d", origState.StorageRadius, 0) @@ -189,7 +189,6 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int if err != nil { return fmt.Errorf("create batch: %w", err) } - // radius is 4 _, err = client.UploadChunk(ctx, pinnedChunk.Data(), api.UploadOptions{Pin: true, BatchID: batchID, Deferred: true}) if err != nil { @@ -204,13 +203,12 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int } } - fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius 4\n", len(lowValueChunks), batchDepth, cheapBatchAmount) + fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius %d\n", len(lowValueChunks), batchDepth, cheapBatchAmount, initialRadius) highValueBatch, err := client.CreatePostageBatch(ctx, expensiveBatchAmount, batchDepth, o.GasPrice, o.PostageLabel, true) if err != nil { return fmt.Errorf("create batch: %w", err) } - // radius is 5 for _, c := range lowValueHigherRadiusChunks { if _, err := client.UploadChunk(ctx, c.Data(), api.UploadOptions{BatchID: batchID, Deferred: true}); err != nil { @@ -218,7 +216,7 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int } } - fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius 5\n", len(lowValueHigherRadiusChunks), batchDepth, cheapBatchAmount) + fmt.Printf("uploaded %d chunks with batch depth %d, amount %d, at radius %d\n", len(lowValueHigherRadiusChunks), batchDepth, cheapBatchAmount, higherRadius) // allow time to sleep so that chunks can get synced and then GCd time.Sleep(5 * time.Second) @@ -229,8 +227,8 @@ func (c *Check) Run(ctx context.Context, cluster orchestration.Cluster, opts int } fmt.Println("Reserve state:", state) - if state.StorageRadius != state.Radius { - return fmt.Errorf("storage radius mismatch. got %d want %d", state.StorageRadius, state.Radius) + if state.StorageRadius != 2 { + return fmt.Errorf("storage radius mismatch. got %d want %d", state.StorageRadius, 2) } _, hasCount, err := client.HasChunks(ctx, bee.AddressOfChunk(lowValueChunks...))