From 444a405c40f35a1912b879ab21401cd3adc97169 Mon Sep 17 00:00:00 2001 From: Kirill Taran Date: Tue, 14 Jun 2022 12:33:34 +0300 Subject: [PATCH 01/87] Checking build at every commit and releasing tags --- .github/workflows/release_build.yml | 21 +++++++++------------ .github/workflows/test_build.yml | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/test_build.yml diff --git a/.github/workflows/release_build.yml b/.github/workflows/release_build.yml index 38bb44e78..e7bda46ef 100644 --- a/.github/workflows/release_build.yml +++ b/.github/workflows/release_build.yml @@ -1,30 +1,27 @@ -name: Release build +name: Release on: push: tags: - - '*' - # TODO how to only run on intersection of these two things? - #branches: - # - 'release/**' + - '*' jobs: - build-and-release: + release: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 + - name: Golang dependency uses: actions/setup-go@v3 with: - go-version: '^1.14' + go-version: '^1.18' + - name: Build - run: | - make opera - cd build && tar -cvf opera.tar.gz opera + run: make opera + - name: Release - # Note: If you get Error 403 you probably need to enable write permissions for workflows uses: ncipollo/release-action@v1 with: + artifacts: "./build/opera" token: ${{ secrets.GITHUB_TOKEN }} - artifacts: "build/opera.tar.gz" diff --git a/.github/workflows/test_build.yml b/.github/workflows/test_build.yml new file mode 100644 index 000000000..508f00faa --- /dev/null +++ b/.github/workflows/test_build.yml @@ -0,0 +1,24 @@ +name: Check build + +on: [push] + +jobs: + check-build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Golang dependency + uses: actions/setup-go@v3 + with: + go-version: '^1.18' + + - name: Build + run: make opera + + - name: Publish + uses: actions/upload-artifact@v2 + with: + name: opera + path: ./build/opera From fe42f8c275f7c022e728e308fdd5cce24fba2ed2 Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 20 Jun 2022 23:33:51 +1000 Subject: [PATCH 02/87] debug --- cmd/tsearch/main.go | 9 ++++ cmd/tsearch/main_test.go | 88 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100644 cmd/tsearch/main.go create mode 100644 cmd/tsearch/main_test.go diff --git a/cmd/tsearch/main.go b/cmd/tsearch/main.go new file mode 100644 index 000000000..20f0d0fb6 --- /dev/null +++ b/cmd/tsearch/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "fmt" +) + +func main() { + fmt.Println("Hello World!") +} diff --git a/cmd/tsearch/main_test.go b/cmd/tsearch/main_test.go new file mode 100644 index 000000000..7edf6599d --- /dev/null +++ b/cmd/tsearch/main_test.go @@ -0,0 +1,88 @@ +package main + +import ( + "context" + "fmt" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/Fantom-foundation/go-opera/gossip" + "github.com/Fantom-foundation/go-opera/integration" + "github.com/Fantom-foundation/lachesis-base/common/bigendian" + "github.com/Fantom-foundation/lachesis-base/inter/idx" + "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" + "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" +) + +var ( + datadir = "/home/fabel/Work/fantom/blockchains/mainnet/chaindata" + blockFrom = idx.Block(0x256e898) // 39250072 - 39350071 = -99999 + blockTo = idx.Block(0x2586f37) + pattern = [][]common.Hash{ + []common.Hash{}, + []common.Hash{common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")}, + []common.Hash(nil), + []common.Hash{common.HexToHash("0x00000000000000000000000089716ad7edc3be3b35695789c475f3e7a3deb12a")}, + } +) + +func TestDirect(t *testing.T) { + require := require.New(t) + + db, err := leveldb.New(datadir+"/gossip", cache64mb(""), 0, nil, nil) + require.NoError(err) + defer db.Close() + + const pos = 1 + + it := db.NewIterator( + append([]byte("Lt"), append(pattern[pos][0].Bytes(), pos)...), + uintToBytes(uint64(blockFrom))) + defer it.Release() + + rows := 0 + defer func(r *int) { + fmt.Printf("rows = %d\n", *r) + }(&rows) + for it.Next() { + rows++ + n := idx.Block(bytesToUint(it.Key()[(2 + 32 + 1) : (2+32+1)+8])) + if n > blockTo { + break + } + if rows%10000 == 0 { + fmt.Printf("rows\t~ %d\t%d\n", rows, n) + } + } +} + +func xTestMain(t *testing.T) { + require := require.New(t) + + rawProducer := leveldb.NewProducer(datadir, cache64mb) + dbs := flushable.NewSyncedPool(rawProducer, integration.FlushIDKey) + dbs.Initialize(rawProducer.Names()) + + cfg := gossip.LiteStoreConfig() + gdb := gossip.NewStore(dbs, cfg) + defer gdb.Close() + + edb := gdb.EvmStore() + logs, err := edb.EvmLogs.FindInBlocks(context.TODO(), blockFrom, blockTo, pattern) + t.Logf("Got: %d", len(logs)) + require.NoError(err) +} + +func cache64mb(string) int { + return 64 * opt.MiB +} +func uintToBytes(n uint64) []byte { + return bigendian.Uint64ToBytes(n) +} + +func bytesToUint(b []byte) uint64 { + return bigendian.BytesToUint64(b) +} From 415b87df2c1804e36449df952bd9fcd9d692ee7b Mon Sep 17 00:00:00 2001 From: alex Date: Thu, 23 Jun 2022 20:49:21 +1000 Subject: [PATCH 03/87] simpler searchLazy() signature --- topicsdb/search_lazy.go | 4 ++-- topicsdb/topicsdb.go | 4 ++-- topicsdb/topicsdb_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/topicsdb/search_lazy.go b/topicsdb/search_lazy.go index 1d6b1c87a..48c55feb6 100644 --- a/topicsdb/search_lazy.go +++ b/topicsdb/search_lazy.go @@ -8,11 +8,11 @@ import ( type logHandler func(rec *logrec) (gonext bool, err error) -func (tt *Index) searchLazy(ctx context.Context, pattern [][]common.Hash, blockStart []byte, blockEnd uint64, onMatched logHandler) (err error) { +func (tt *Index) searchLazy(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) (err error) { if ctx == nil { ctx = context.Background() } - _, err = tt.walkFirst(ctx, blockStart, blockEnd, pattern, 0, onMatched) + _, err = tt.walkFirst(ctx, uintToBytes(blockStart), blockEnd, pattern, 0, onMatched) return } diff --git a/topicsdb/topicsdb.go b/topicsdb/topicsdb.go index 0c3947dcc..9d3db0916 100644 --- a/topicsdb/topicsdb.go +++ b/topicsdb/topicsdb.go @@ -73,7 +73,7 @@ func (tt *Index) ForEach(ctx context.Context, pattern [][]common.Hash, onLog fun return } - return tt.searchLazy(ctx, pattern, nil, 0, onMatched) + return tt.searchLazy(ctx, pattern, 0, 0, onMatched) } // ForEachInBlocks matches log records of block range by pattern. 1st pattern element is an address. @@ -97,7 +97,7 @@ func (tt *Index) ForEachInBlocks(ctx context.Context, from, to idx.Block, patter return } - return tt.searchLazy(ctx, pattern, uintToBytes(uint64(from)), uint64(to), onMatched) + return tt.searchLazy(ctx, pattern, uint64(from), uint64(to), onMatched) } func limitPattern(pattern [][]common.Hash) (limited [][]common.Hash, err error) { diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index 360019291..9a02f3d7a 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -60,7 +60,7 @@ func (tt *Index) FindInBlocksAsync(ctx context.Context, from, to idx.Block, patt return } - err = tt.searchLazy(ctx, pattern, uintToBytes(uint64(from)), uint64(to), onMatched) + err = tt.searchLazy(ctx, pattern, uint64(from), uint64(to), onMatched) wg.Wait() return From 30c8ab6d1f1cc5024e6006276a8c938258ba4d51 Mon Sep 17 00:00:00 2001 From: alex Date: Fri, 24 Jun 2022 02:29:58 +1000 Subject: [PATCH 04/87] simple topicsdb/Index.searchParallel() --- topicsdb/record.go | 2 + topicsdb/search_parallel.go | 97 +++++++++++++++++++++++++++++++++++++ topicsdb/topicsdb.go | 4 +- topicsdb/topicsdb_test.go | 2 +- 4 files changed, 102 insertions(+), 3 deletions(-) create mode 100644 topicsdb/search_parallel.go diff --git a/topicsdb/record.go b/topicsdb/record.go index 5c00336ac..a807ecbc7 100644 --- a/topicsdb/record.go +++ b/topicsdb/record.go @@ -12,6 +12,8 @@ type ( topicsCount uint8 result *types.Log err error + + matched int } ) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go new file mode 100644 index 000000000..4a35fc8f6 --- /dev/null +++ b/topicsdb/search_parallel.go @@ -0,0 +1,97 @@ +package topicsdb + +import ( + "context" + "sync" + + "github.com/ethereum/go-ethereum/common" +) + +func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) (err error) { + if ctx == nil { + ctx = context.Background() + } + + var ( + flag sync.Mutex + wgStart, wgFinish sync.WaitGroup + count int + result = make(map[ID]*logrec) + ) + + wgStart.Add(1) + for pos := range pattern { + if len(pattern[pos]) > 0 { + count++ + } + for _, variant := range pattern[pos] { + wgFinish.Add(1) + go tt.scanPatternVariant( + uint8(pos), variant, blockStart, + func(rec *logrec) (gonext bool, err error) { + wgStart.Wait() + + if rec == nil { + wgFinish.Done() + return + } + + err = ctx.Err() + gonext = (err == nil) + + if rec.topicsCount < uint8(len(pattern)-1) { + return + } + if blockEnd > 0 && rec.ID.BlockNumber() > blockEnd { + gonext = false + return + } + + flag.Lock() + defer flag.Unlock() + + if prev, ok := result[rec.ID]; ok { + prev.matched++ + } else { + rec.matched++ + result[rec.ID] = rec + } + + return + }) + } + } + wgStart.Done() + + wgFinish.Wait() + var gonext bool + for _, rec := range result { + if rec.matched != count { + continue + } + gonext, err = onMatched(rec) + if !gonext { + break + } + } + + return +} + +func (tt *Index) scanPatternVariant(pos uint8, variant common.Hash, start uint64, onMatched logHandler) { + prefix := append(variant.Bytes(), posToBytes(pos)...) + + it := tt.table.Topic.NewIterator(prefix, uintToBytes(start)) + defer it.Release() + for it.Next() { + id := extractLogrecID(it.Key()) + topicCount := bytesToPos(it.Value()) + rec := newLogrec(id, topicCount) + + gonext, _ := onMatched(rec) + if !gonext { + break + } + } + onMatched(nil) +} diff --git a/topicsdb/topicsdb.go b/topicsdb/topicsdb.go index 9d3db0916..45054b19e 100644 --- a/topicsdb/topicsdb.go +++ b/topicsdb/topicsdb.go @@ -73,7 +73,7 @@ func (tt *Index) ForEach(ctx context.Context, pattern [][]common.Hash, onLog fun return } - return tt.searchLazy(ctx, pattern, 0, 0, onMatched) + return tt.searchParallel(ctx, pattern, 0, 0, onMatched) } // ForEachInBlocks matches log records of block range by pattern. 1st pattern element is an address. @@ -97,7 +97,7 @@ func (tt *Index) ForEachInBlocks(ctx context.Context, from, to idx.Block, patter return } - return tt.searchLazy(ctx, pattern, uint64(from), uint64(to), onMatched) + return tt.searchParallel(ctx, pattern, uint64(from), uint64(to), onMatched) } func limitPattern(pattern [][]common.Hash) (limited [][]common.Hash, err error) { diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index 9a02f3d7a..380ca6753 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -60,7 +60,7 @@ func (tt *Index) FindInBlocksAsync(ctx context.Context, from, to idx.Block, patt return } - err = tt.searchLazy(ctx, pattern, uint64(from), uint64(to), onMatched) + err = tt.searchParallel(ctx, pattern, uint64(from), uint64(to), onMatched) wg.Wait() return From 413c538be212f4c0658189305dcd91d46c918722 Mon Sep 17 00:00:00 2001 From: alex Date: Fri, 24 Jun 2022 18:33:42 +1000 Subject: [PATCH 05/87] fix of limitPattern() --- topicsdb/topicsdb.go | 4 ++-- topicsdb/topicsdb_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/topicsdb/topicsdb.go b/topicsdb/topicsdb.go index 45054b19e..acde8129a 100644 --- a/topicsdb/topicsdb.go +++ b/topicsdb/topicsdb.go @@ -101,8 +101,8 @@ func (tt *Index) ForEachInBlocks(ctx context.Context, from, to idx.Block, patter } func limitPattern(pattern [][]common.Hash) (limited [][]common.Hash, err error) { - if len(pattern) > MaxTopicsCount { - limited = make([][]common.Hash, MaxTopicsCount) + if len(pattern) > (MaxTopicsCount + 1) { + limited = make([][]common.Hash, (MaxTopicsCount + 1)) } else { limited = make([][]common.Hash, len(pattern)) } diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index 380ca6753..7fe573c5b 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -382,8 +382,8 @@ func TestPatternLimit(t *testing.T) { err: nil, }, { - pattern: append(append(make([][]common.Hash, MaxTopicsCount-1), []common.Hash{hash.FakeHash(1)}), []common.Hash{hash.FakeHash(1)}), - exp: append(make([][]common.Hash, MaxTopicsCount-1), []common.Hash{hash.FakeHash(1)}), + pattern: append(append(make([][]common.Hash, MaxTopicsCount), []common.Hash{hash.FakeHash(1)}), []common.Hash{hash.FakeHash(1)}), + exp: append(make([][]common.Hash, MaxTopicsCount), []common.Hash{hash.FakeHash(1)}), err: nil, }, } From 9adc3caaa22355ea0154b3fae2a72e7fdf134810 Mon Sep 17 00:00:00 2001 From: alex Date: Fri, 24 Jun 2022 23:05:38 +1000 Subject: [PATCH 06/87] aggregator func --- topicsdb/search_parallel.go | 101 ++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 45 deletions(-) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index 4a35fc8f6..94e084dcd 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -13,60 +13,71 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl } var ( - flag sync.Mutex - wgStart, wgFinish sync.WaitGroup - count int - result = make(map[ID]*logrec) + threads sync.WaitGroup + positions = make([]int, 0, len(pattern)) + result = make(map[ID]*logrec) ) - wgStart.Add(1) + var mu sync.Mutex + aggregator := func(pos, num int) logHandler { + return func(rec *logrec) (gonext bool, err error) { + if rec == nil { + threads.Done() + return + } + + err = ctx.Err() + if err != nil { + return + } + + if blockEnd > 0 && rec.ID.BlockNumber() > blockEnd { + return + } + + gonext = true + + if rec.topicsCount < uint8(len(pattern)-1) { + return + } + + mu.Lock() + defer mu.Unlock() + + if prev, ok := result[rec.ID]; ok { + prev.matched++ + } else { + rec.matched++ + result[rec.ID] = rec + } + + return + } + } + + // start the threads + var preparing sync.WaitGroup + preparing.Add(1) for pos := range pattern { - if len(pattern[pos]) > 0 { - count++ + if len(pattern[pos]) == 0 { + continue } - for _, variant := range pattern[pos] { - wgFinish.Add(1) - go tt.scanPatternVariant( - uint8(pos), variant, blockStart, - func(rec *logrec) (gonext bool, err error) { - wgStart.Wait() - - if rec == nil { - wgFinish.Done() - return - } - - err = ctx.Err() - gonext = (err == nil) - - if rec.topicsCount < uint8(len(pattern)-1) { - return - } - if blockEnd > 0 && rec.ID.BlockNumber() > blockEnd { - gonext = false - return - } - - flag.Lock() - defer flag.Unlock() - - if prev, ok := result[rec.ID]; ok { - prev.matched++ - } else { - rec.matched++ - result[rec.ID] = rec - } - - return - }) + positions = append(positions, pos) + for i, variant := range pattern[pos] { + threads.Add(1) + go func(pos, i int, variant common.Hash) { + onMatched := aggregator(pos, i) + preparing.Wait() + tt.scanPatternVariant(uint8(pos), variant, blockStart, onMatched) + }(pos, i, variant) } } - wgStart.Done() + preparing.Done() - wgFinish.Wait() + threads.Wait() var gonext bool for _, rec := range result { - if rec.matched != count { + if rec.matched != len(positions) { continue } gonext, err = onMatched(rec) From 0a011c596f12c650500e9370d473b801d1d625fa Mon Sep 17 00:00:00 2001 From: alex Date: Sat, 25 Jun 2022 17:22:19 +1000 Subject: [PATCH 07/87] synchronizator stub --- topicsdb/search_parallel.go | 19 +++++++++---------- topicsdb/sync.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 10 deletions(-) create mode 100644 topicsdb/sync.go diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index 94e084dcd..4a1adde43 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -13,16 +13,15 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl } var ( - threads sync.WaitGroup - positions = make([]int, 0, len(pattern)) - result = make(map[ID]*logrec) + syncing = newSynchronizator() + mu sync.Mutex + result = make(map[ID]*logrec) ) - var mu sync.Mutex aggregator := func(pos, num int) logHandler { return func(rec *logrec) (gonext bool, err error) { if rec == nil { - threads.Done() + syncing.FinishThread(pos, num) return } @@ -31,7 +30,8 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl return } - if blockEnd > 0 && rec.ID.BlockNumber() > blockEnd { + block := rec.ID.BlockNumber() + if blockEnd > 0 && block > blockEnd { return } @@ -62,9 +62,8 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl if len(pattern[pos]) == 0 { continue } - positions = append(positions, pos) for i, variant := range pattern[pos] { - threads.Add(1) + syncing.StartThread(pos, i) go func(pos, i int, variant common.Hash) { onMatched := aggregator(pos, i) preparing.Wait() @@ -74,10 +73,10 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl } preparing.Done() - threads.Wait() + syncing.WaitForThreads() var gonext bool for _, rec := range result { - if rec.matched != len(positions) { + if rec.matched != syncing.Criteries() { continue } gonext, err = onMatched(rec) diff --git a/topicsdb/sync.go b/topicsdb/sync.go new file mode 100644 index 000000000..fc0d55e3b --- /dev/null +++ b/topicsdb/sync.go @@ -0,0 +1,36 @@ +package topicsdb + +import ( + "sync" +) + +type synchronizator struct { + sync.Mutex + threads sync.WaitGroup + positions []int +} + +func newSynchronizator() *synchronizator { + return &synchronizator{ + positions: make([]int, 0), + } +} + +func (s *synchronizator) StartThread(pos int, num int) { + s.threads.Add(1) + if len(s.positions) == 0 || s.positions[len(s.positions)-1] != pos { + s.positions = append(s.positions, pos) + } +} + +func (s *synchronizator) FinishThread(pos int, num int) { + s.threads.Done() +} + +func (s *synchronizator) WaitForThreads() { + s.threads.Wait() +} + +func (s *synchronizator) Criteries() int { + return len(s.positions) +} From eea1e860d19e396fd777bf186f97bd8939e5cee7 Mon Sep 17 00:00:00 2001 From: alex Date: Sat, 25 Jun 2022 18:19:49 +1000 Subject: [PATCH 08/87] return found log straightway --- topicsdb/search_parallel.go | 28 +++++++++++++++------------- topicsdb/sync.go | 3 +++ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index 4a1adde43..db9f76ba6 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -35,7 +35,10 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl return } - gonext = true + gonext = syncing.GoNext + if !gonext { + return + } if rec.topicsCount < uint8(len(pattern)-1) { return @@ -45,12 +48,21 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl defer mu.Unlock() if prev, ok := result[rec.ID]; ok { - prev.matched++ + rec = prev } else { - rec.matched++ result[rec.ID] = rec } + rec.matched++ + if rec.matched == syncing.Criteries() { + delete(result, rec.ID) + gonext, err = onMatched(rec) + if !gonext { + syncing.GoNext = false + return + } + } + return } } @@ -74,16 +86,6 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl preparing.Done() syncing.WaitForThreads() - var gonext bool - for _, rec := range result { - if rec.matched != syncing.Criteries() { - continue - } - gonext, err = onMatched(rec) - if !gonext { - break - } - } return } diff --git a/topicsdb/sync.go b/topicsdb/sync.go index fc0d55e3b..284972772 100644 --- a/topicsdb/sync.go +++ b/topicsdb/sync.go @@ -8,11 +8,14 @@ type synchronizator struct { sync.Mutex threads sync.WaitGroup positions []int + + GoNext bool } func newSynchronizator() *synchronizator { return &synchronizator{ positions: make([]int, 0), + GoNext: true, } } From de479be555ff8cbef0f88457960774c5d51e624b Mon Sep 17 00:00:00 2001 From: alex Date: Sun, 26 Jun 2022 01:31:45 +1000 Subject: [PATCH 09/87] sync threads by block --- topicsdb/search_parallel.go | 11 ++-- topicsdb/sync.go | 114 ++++++++++++++++++++++++++++++++---- 2 files changed, 107 insertions(+), 18 deletions(-) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index db9f76ba6..f0f38085d 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" ) -func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) (err error) { +func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) error { if ctx == nil { ctx = context.Background() } @@ -35,7 +35,7 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl return } - gonext = syncing.GoNext + gonext = syncing.GoNext(block) if !gonext { return } @@ -52,13 +52,12 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl } else { result[rec.ID] = rec } - rec.matched++ - if rec.matched == syncing.Criteries() { + if rec.matched == syncing.CriteriesCount() { delete(result, rec.ID) gonext, err = onMatched(rec) if !gonext { - syncing.GoNext = false + syncing.Halt() return } } @@ -87,7 +86,7 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl syncing.WaitForThreads() - return + return ctx.Err() } func (tt *Index) scanPatternVariant(pos uint8, variant common.Hash, start uint64, onMatched logHandler) { diff --git a/topicsdb/sync.go b/topicsdb/sync.go index 284972772..90316f84f 100644 --- a/topicsdb/sync.go +++ b/topicsdb/sync.go @@ -1,39 +1,129 @@ package topicsdb import ( + "math" "sync" ) -type synchronizator struct { - sync.Mutex - threads sync.WaitGroup - positions []int +type ( + posCounter struct { + pos int + count int + } - GoNext bool -} + blockCounter struct { + wait chan struct{} + count int + } + + synchronizator struct { + mu sync.Mutex + threads sync.WaitGroup + positions []*posCounter + goNext bool + minBlock uint64 + blocks map[uint64]*blockCounter + } +) func newSynchronizator() *synchronizator { - return &synchronizator{ - positions: make([]int, 0), - GoNext: true, + s := &synchronizator{ + positions: make([]*posCounter, 0), + goNext: true, + minBlock: 0, + blocks: make(map[uint64]*blockCounter), } + + return s +} + +func (s *synchronizator) Halt() { + s.goNext = false + + s.mu.Lock() + defer s.mu.Unlock() + + for n := range s.blocks { + if n != s.minBlock { + close(s.blocks[n].wait) + } + } +} + +func (s *synchronizator) GoNext(n uint64) bool { + if !s.goNext { + return false + } + + if n > s.minBlock { + s.mu.Lock() + s.enqueueBlock(n) + s.dequeueBlock() + s.mu.Unlock() + // wait for other threads + <-s.blocks[n].wait + } + + return s.goNext } func (s *synchronizator) StartThread(pos int, num int) { s.threads.Add(1) - if len(s.positions) == 0 || s.positions[len(s.positions)-1] != pos { - s.positions = append(s.positions, pos) + + s.mu.Lock() + defer s.mu.Unlock() + + s.enqueueBlock(s.minBlock) + + if len(s.positions) == 0 || s.positions[len(s.positions)-1].pos != pos { + s.positions = append(s.positions, &posCounter{pos, 1}) + } else { + s.positions[len(s.positions)-1].count++ } } func (s *synchronizator) FinishThread(pos int, num int) { s.threads.Done() + + s.mu.Lock() + defer s.mu.Unlock() + + s.dequeueBlock() +} + +func (s *synchronizator) enqueueBlock(n uint64) { + if _, ok := s.blocks[n]; ok { + s.blocks[n].count++ + } else { + s.blocks[n] = &blockCounter{ + wait: make(chan struct{}), + count: 1, + } + } +} + +func (s *synchronizator) dequeueBlock() { + s.blocks[s.minBlock].count-- + if s.blocks[s.minBlock].count < 1 { + delete(s.blocks, s.minBlock) + if len(s.blocks) < 1 { + return + } + // find new minBlock + s.minBlock = math.MaxUint64 + for b := range s.blocks { + if s.minBlock > b { + s.minBlock = b + } + } + close(s.blocks[s.minBlock].wait) + } } func (s *synchronizator) WaitForThreads() { s.threads.Wait() } -func (s *synchronizator) Criteries() int { +func (s *synchronizator) CriteriesCount() int { return len(s.positions) } From 0e569d593a8f8ef5ac40830b24919e9b9627883d Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 27 Jun 2022 16:55:35 +1000 Subject: [PATCH 10/87] break on no position --- topicsdb/search_parallel.go | 2 +- topicsdb/sync.go | 21 +++++++++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index f0f38085d..9bd1e01c0 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -53,7 +53,7 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl result[rec.ID] = rec } rec.matched++ - if rec.matched == syncing.CriteriesCount() { + if rec.matched == syncing.PositionsCount() { delete(result, rec.ID) gonext, err = onMatched(rec) if !gonext { diff --git a/topicsdb/sync.go b/topicsdb/sync.go index 90316f84f..6cbcf7125 100644 --- a/topicsdb/sync.go +++ b/topicsdb/sync.go @@ -19,7 +19,7 @@ type ( synchronizator struct { mu sync.Mutex threads sync.WaitGroup - positions []*posCounter + positions map[int]*posCounter goNext bool minBlock uint64 blocks map[uint64]*blockCounter @@ -28,7 +28,7 @@ type ( func newSynchronizator() *synchronizator { s := &synchronizator{ - positions: make([]*posCounter, 0), + positions: make(map[int]*posCounter), goNext: true, minBlock: 0, blocks: make(map[uint64]*blockCounter), @@ -75,10 +75,10 @@ func (s *synchronizator) StartThread(pos int, num int) { s.enqueueBlock(s.minBlock) - if len(s.positions) == 0 || s.positions[len(s.positions)-1].pos != pos { - s.positions = append(s.positions, &posCounter{pos, 1}) + if _, ok := s.positions[pos]; ok { + s.positions[pos].count++ } else { - s.positions[len(s.positions)-1].count++ + s.positions[pos] = &posCounter{pos, 1} } } @@ -88,6 +88,7 @@ func (s *synchronizator) FinishThread(pos int, num int) { s.mu.Lock() defer s.mu.Unlock() + s.positions[pos].count-- s.dequeueBlock() } @@ -105,6 +106,14 @@ func (s *synchronizator) enqueueBlock(n uint64) { func (s *synchronizator) dequeueBlock() { s.blocks[s.minBlock].count-- if s.blocks[s.minBlock].count < 1 { + + for _, pos := range s.positions { + if pos.count < 1 { + s.goNext = false + break + } + } + delete(s.blocks, s.minBlock) if len(s.blocks) < 1 { return @@ -124,6 +133,6 @@ func (s *synchronizator) WaitForThreads() { s.threads.Wait() } -func (s *synchronizator) CriteriesCount() int { +func (s *synchronizator) PositionsCount() int { return len(s.positions) } From 8666393ecfc977bbf5014c8b77f5fb36c3453db2 Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 27 Jun 2022 23:19:05 +1000 Subject: [PATCH 11/87] debug --- cmd/tsearch/main_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tsearch/main_test.go b/cmd/tsearch/main_test.go index 7edf6599d..226f39810 100644 --- a/cmd/tsearch/main_test.go +++ b/cmd/tsearch/main_test.go @@ -29,7 +29,7 @@ var ( } ) -func TestDirect(t *testing.T) { +func xTestDirect(t *testing.T) { require := require.New(t) db, err := leveldb.New(datadir+"/gossip", cache64mb(""), 0, nil, nil) @@ -59,7 +59,7 @@ func TestDirect(t *testing.T) { } } -func xTestMain(t *testing.T) { +func TestMain(t *testing.T) { require := require.New(t) rawProducer := leveldb.NewProducer(datadir, cache64mb) From 84547c19eb7eb8b438dbee8419a594e6841257df Mon Sep 17 00:00:00 2001 From: alex Date: Tue, 28 Jun 2022 16:26:24 +1000 Subject: [PATCH 12/87] rm old Index.searchLazy() --- topicsdb/search_lazy.go | 138 ------------------------------------ topicsdb/search_parallel.go | 2 + 2 files changed, 2 insertions(+), 138 deletions(-) delete mode 100644 topicsdb/search_lazy.go diff --git a/topicsdb/search_lazy.go b/topicsdb/search_lazy.go deleted file mode 100644 index 48c55feb6..000000000 --- a/topicsdb/search_lazy.go +++ /dev/null @@ -1,138 +0,0 @@ -package topicsdb - -import ( - "context" - - "github.com/ethereum/go-ethereum/common" -) - -type logHandler func(rec *logrec) (gonext bool, err error) - -func (tt *Index) searchLazy(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) (err error) { - if ctx == nil { - ctx = context.Background() - } - _, err = tt.walkFirst(ctx, uintToBytes(blockStart), blockEnd, pattern, 0, onMatched) - return -} - -// walkFirst for topics recursive. -func (tt *Index) walkFirst( - ctx context.Context, blockStart []byte, blockEnd uint64, pattern [][]common.Hash, pos uint8, onMatched logHandler, -) ( - gonext bool, err error, -) { - patternLen := uint8(len(pattern)) - gonext = true - for { - if pos >= patternLen { - return - } - if len(pattern[pos]) < 1 { - pos++ - continue - } - break - } - - var ( - prefix [topicKeySize]byte - prefLen int = hashSize - ) - copy(prefix[prefLen:], posToBytes(pos)) - prefLen += uint8Size - - for _, variant := range pattern[pos] { - copy(prefix[0:], variant.Bytes()) - it := tt.table.Topic.NewIterator(prefix[:prefLen], blockStart) - for it.Next() { - err = ctx.Err() - if err != nil { - it.Release() - return - } - - topicCount := bytesToPos(it.Value()) - if topicCount < (patternLen - 1) { - continue - } - id := extractLogrecID(it.Key()) - rec := newLogrec(id, topicCount) - - if blockStart != nil && rec.ID.BlockNumber() > blockEnd { - break - } - - gonext, err = tt.walkNexts(ctx, rec, pattern, pos+1, onMatched) - if err != nil || !gonext { - it.Release() - return - } - } - - err = it.Error() - it.Release() - if err != nil { - return - } - - } - return -} - -// walkNexts for topics recursive. -func (tt *Index) walkNexts( - ctx context.Context, rec *logrec, pattern [][]common.Hash, pos uint8, onMatched logHandler, -) ( - gonext bool, err error, -) { - patternLen := uint8(len(pattern)) - gonext = true - for { - // Max recursion depth is equal to len(topics) and limited by MaxCount. - if pos >= patternLen { - gonext, err = onMatched(rec) - return - } - if len(pattern[pos]) < 1 { - pos++ - continue - } - break - } - - var ( - prefix [topicKeySize]byte - prefLen int = hashSize - ) - copy(prefix[prefLen:], posToBytes(pos)) - prefLen += uint8Size - copy(prefix[prefLen:], rec.ID.Bytes()) - prefLen += logrecKeySize - - for _, variant := range pattern[pos] { - copy(prefix[0:], variant.Bytes()) - it := tt.table.Topic.NewIterator(prefix[:prefLen], nil) - for it.Next() { - err = ctx.Err() - if err != nil { - it.Release() - return - } - - gonext, err = tt.walkNexts(ctx, rec, pattern, pos+1, onMatched) - if err != nil || !gonext { - it.Release() - return - } - } - - err = it.Error() - it.Release() - if err != nil { - return - } - - } - return -} diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index 9bd1e01c0..2d4a267b2 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -7,6 +7,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +type logHandler func(rec *logrec) (gonext bool, err error) + func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, blockStart, blockEnd uint64, onMatched logHandler) error { if ctx == nil { ctx = context.Background() From 32fd83edfa5b34f97bcd15a5f86ab43930a6e939 Mon Sep 17 00:00:00 2001 From: alex Date: Tue, 28 Jun 2022 16:47:13 +1000 Subject: [PATCH 13/87] rm debug --- cmd/tsearch/main.go | 9 ---- cmd/tsearch/main_test.go | 88 ---------------------------------------- 2 files changed, 97 deletions(-) delete mode 100644 cmd/tsearch/main.go delete mode 100644 cmd/tsearch/main_test.go diff --git a/cmd/tsearch/main.go b/cmd/tsearch/main.go deleted file mode 100644 index 20f0d0fb6..000000000 --- a/cmd/tsearch/main.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -import ( - "fmt" -) - -func main() { - fmt.Println("Hello World!") -} diff --git a/cmd/tsearch/main_test.go b/cmd/tsearch/main_test.go deleted file mode 100644 index 226f39810..000000000 --- a/cmd/tsearch/main_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "context" - "fmt" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" - "github.com/syndtr/goleveldb/leveldb/opt" - - "github.com/Fantom-foundation/go-opera/gossip" - "github.com/Fantom-foundation/go-opera/integration" - "github.com/Fantom-foundation/lachesis-base/common/bigendian" - "github.com/Fantom-foundation/lachesis-base/inter/idx" - "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" - "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" -) - -var ( - datadir = "/home/fabel/Work/fantom/blockchains/mainnet/chaindata" - blockFrom = idx.Block(0x256e898) // 39250072 - 39350071 = -99999 - blockTo = idx.Block(0x2586f37) - pattern = [][]common.Hash{ - []common.Hash{}, - []common.Hash{common.HexToHash("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef")}, - []common.Hash(nil), - []common.Hash{common.HexToHash("0x00000000000000000000000089716ad7edc3be3b35695789c475f3e7a3deb12a")}, - } -) - -func xTestDirect(t *testing.T) { - require := require.New(t) - - db, err := leveldb.New(datadir+"/gossip", cache64mb(""), 0, nil, nil) - require.NoError(err) - defer db.Close() - - const pos = 1 - - it := db.NewIterator( - append([]byte("Lt"), append(pattern[pos][0].Bytes(), pos)...), - uintToBytes(uint64(blockFrom))) - defer it.Release() - - rows := 0 - defer func(r *int) { - fmt.Printf("rows = %d\n", *r) - }(&rows) - for it.Next() { - rows++ - n := idx.Block(bytesToUint(it.Key()[(2 + 32 + 1) : (2+32+1)+8])) - if n > blockTo { - break - } - if rows%10000 == 0 { - fmt.Printf("rows\t~ %d\t%d\n", rows, n) - } - } -} - -func TestMain(t *testing.T) { - require := require.New(t) - - rawProducer := leveldb.NewProducer(datadir, cache64mb) - dbs := flushable.NewSyncedPool(rawProducer, integration.FlushIDKey) - dbs.Initialize(rawProducer.Names()) - - cfg := gossip.LiteStoreConfig() - gdb := gossip.NewStore(dbs, cfg) - defer gdb.Close() - - edb := gdb.EvmStore() - logs, err := edb.EvmLogs.FindInBlocks(context.TODO(), blockFrom, blockTo, pattern) - t.Logf("Got: %d", len(logs)) - require.NoError(err) -} - -func cache64mb(string) int { - return 64 * opt.MiB -} -func uintToBytes(n uint64) []byte { - return bigendian.Uint64ToBytes(n) -} - -func bytesToUint(b []byte) uint64 { - return bigendian.BytesToUint64(b) -} From d476f948306a213f8d2db01f21d539559e2b9b05 Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 29 Jun 2022 17:49:35 +1000 Subject: [PATCH 14/87] datarace fix --- topicsdb/sync.go | 3 ++- topicsdb/topicsdb_test.go | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/topicsdb/sync.go b/topicsdb/sync.go index 6cbcf7125..09493c8c9 100644 --- a/topicsdb/sync.go +++ b/topicsdb/sync.go @@ -59,9 +59,10 @@ func (s *synchronizator) GoNext(n uint64) bool { s.mu.Lock() s.enqueueBlock(n) s.dequeueBlock() + wait := s.blocks[n].wait s.mu.Unlock() // wait for other threads - <-s.blocks[n].wait + <-wait } return s.goNext diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index 7fe573c5b..18f501bf2 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -36,16 +36,20 @@ func (tt *Index) FindInBlocksAsync(ctx context.Context, from, to idx.Block, patt go func() { failed := false for rec := range ready { - wg.Done() + if failed { + wg.Done() continue } if rec.err != nil { err = rec.err failed = true + wg.Done() continue } + logs = append(logs, rec.result) + wg.Done() } }() @@ -61,6 +65,7 @@ func (tt *Index) FindInBlocksAsync(ctx context.Context, from, to idx.Block, patt } err = tt.searchParallel(ctx, pattern, uint64(from), uint64(to), onMatched) + wg.Wait() return From ea9b8475cd84b201e9f457f97b77582460cc171b Mon Sep 17 00:00:00 2001 From: alex Date: Mon, 4 Jul 2022 18:08:34 +1000 Subject: [PATCH 15/87] new recs buffer for each block to free mem --- topicsdb/search_parallel.go | 31 ++++++++++++++++++++----------- topicsdb/sync.go | 8 +++++--- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/topicsdb/search_parallel.go b/topicsdb/search_parallel.go index 2d4a267b2..ef95be2ac 100644 --- a/topicsdb/search_parallel.go +++ b/topicsdb/search_parallel.go @@ -15,9 +15,9 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl } var ( - syncing = newSynchronizator() - mu sync.Mutex - result = make(map[ID]*logrec) + syncing = newSynchronizator() + mu sync.Mutex + foundByBlock = make(map[uint64]map[ID]*logrec) ) aggregator := func(pos, num int) logHandler { @@ -36,27 +36,36 @@ func (tt *Index) searchParallel(ctx context.Context, pattern [][]common.Hash, bl if blockEnd > 0 && block > blockEnd { return } - - gonext = syncing.GoNext(block) - if !gonext { + if rec.topicsCount < uint8(len(pattern)-1) { return } - if rec.topicsCount < uint8(len(pattern)-1) { + var prevBlock uint64 + prevBlock, gonext = syncing.GoNext(block) + if !gonext { return } mu.Lock() defer mu.Unlock() - if prev, ok := result[rec.ID]; ok { - rec = prev + if prevBlock > 0 { + delete(foundByBlock, prevBlock) + } + + found, ok := foundByBlock[block] + if !ok { + found = make(map[ID]*logrec) + foundByBlock[block] = found + } + + if before, ok := found[rec.ID]; ok { + rec = before } else { - result[rec.ID] = rec + found[rec.ID] = rec } rec.matched++ if rec.matched == syncing.PositionsCount() { - delete(result, rec.ID) gonext, err = onMatched(rec) if !gonext { syncing.Halt() diff --git a/topicsdb/sync.go b/topicsdb/sync.go index 09493c8c9..446d0be19 100644 --- a/topicsdb/sync.go +++ b/topicsdb/sync.go @@ -50,13 +50,14 @@ func (s *synchronizator) Halt() { } } -func (s *synchronizator) GoNext(n uint64) bool { +func (s *synchronizator) GoNext(n uint64) (prev uint64, gonext bool) { if !s.goNext { - return false + return } if n > s.minBlock { s.mu.Lock() + prev = s.minBlock s.enqueueBlock(n) s.dequeueBlock() wait := s.blocks[n].wait @@ -65,7 +66,8 @@ func (s *synchronizator) GoNext(n uint64) bool { <-wait } - return s.goNext + gonext = s.goNext + return } func (s *synchronizator) StartThread(pos int, num int) { From 18fe5b84b6325a18c5cd1543447c56a35c94d3fe Mon Sep 17 00:00:00 2001 From: alex Date: Tue, 5 Jul 2022 19:46:36 +1000 Subject: [PATCH 16/87] valid fake enode --- cmd/opera/launcher/launcher.go | 2 +- cmd/opera/launcher/params.go | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 9fb4f8076..7cd2395ae 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -307,7 +307,7 @@ func makeNode(ctx *cli.Context, cfg *config, genesisStore *genesisstore.Store) ( if len(networkName) == 0 && genesisStore != nil { networkName = genesisStore.Header().NetworkName } - if len(cfg.Node.P2P.BootstrapNodes) == len(asDefault) && cfg.Node.P2P.BootstrapNodes[0] == asDefault[0] { + if needDefaultBootnodes(cfg.Node.P2P.BootstrapNodes) { bootnodes := Bootnodes[networkName] if bootnodes == nil { bootnodes = []string{} diff --git a/cmd/opera/launcher/params.go b/cmd/opera/launcher/params.go index 261276d8b..dc7a2c784 100644 --- a/cmd/opera/launcher/params.go +++ b/cmd/opera/launcher/params.go @@ -2,7 +2,9 @@ package launcher import ( "github.com/Fantom-foundation/lachesis-base/hash" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" "github.com/Fantom-foundation/go-opera/opera" @@ -26,9 +28,6 @@ var ( }, } - // asDefault is slice with one empty element which indicates that network default bootnodes should be substituted - asDefault = []*enode.Node{{}} - mainnetHeader = genesis.Header{ GenesisID: hash.HexToHash("0x4a53c5445584b3bfc20dbfb2ec18ae20037c716f3ba2d9e1da768a9deca17cb4"), NetworkID: opera.MainNetworkID, @@ -156,3 +155,18 @@ func overrideParams() { params.RinkebyBootnodes = []string{} params.GoerliBootnodes = []string{} } + +// asDefault is slice with one fake element which indicates that network default bootnodes should be substituted +var asDefault = fakeBootnodes() + +func fakeBootnodes() []*enode.Node { + privkey, _ := crypto.HexToECDSA("0b10b00000000000000000000000000000000000000000000000000000000000") + r := &enr.Record{} + enode.SignV4(r, privkey) + n, _ := enode.New(enode.ValidSchemes, r) + return []*enode.Node{n} +} + +func needDefaultBootnodes(nn []*enode.Node) bool { + return len(nn) == len(asDefault) && nn[0] == asDefault[0] +} From 2a5f28ca1c649c21433a23862706684edf8c86bc Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 6 Jul 2022 22:25:07 +1000 Subject: [PATCH 17/87] custom config marshaling instead of fake enode --- cmd/opera/launcher/config_custom.go | 79 +++++++++++++++++++++++++++++ cmd/opera/launcher/params.go | 18 ------- 2 files changed, 79 insertions(+), 18 deletions(-) create mode 100644 cmd/opera/launcher/config_custom.go diff --git a/cmd/opera/launcher/config_custom.go b/cmd/opera/launcher/config_custom.go new file mode 100644 index 000000000..69e50e6c6 --- /dev/null +++ b/cmd/opera/launcher/config_custom.go @@ -0,0 +1,79 @@ +package launcher + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/naoina/toml" + "github.com/naoina/toml/ast" +) + +// asDefault is slice with one empty element +// which indicates that network default bootnodes should be substituted +var asDefault = []*enode.Node{{}} + +func needDefaultBootnodes(nn []*enode.Node) bool { + return len(nn) == len(asDefault) && nn[0] == asDefault[0] +} + +func isBootstrapNodesDefault(root *ast.Table) ( + bootstrapNodes bool, + bootstrapNodesV5 bool, +) { + table := root + for _, path := range []string{"Node", "P2P"} { + val, ok := table.Fields[path] + if !ok { + return + } + table = val.(*ast.Table) + } + + emptyNode := fmt.Sprintf("\"%s\"", asDefault[0]) + + var res = map[string]bool{ + "BootstrapNodes": false, + "BootstrapNodesV5": false, + } + for name := range res { + if val, ok := table.Fields[name]; ok { + kv := val.(*ast.KeyValue) + arr := kv.Value.(*ast.Array) + if len(arr.Value) == len(asDefault) && arr.Value[0].Source() == emptyNode { + res[name] = true + delete(table.Fields, name) + } + } + } + bootstrapNodes = res["BootstrapNodes"] + bootstrapNodesV5 = res["BootstrapNodesV5"] + + return +} + +// UnmarshalTOML implements toml.Unmarshaler. +func (c *config) UnmarshalTOML(input []byte) error { + ast, err := toml.Parse(input) + if err != nil { + return err + } + + defaultBootstrapNodes, defaultBootstrapNodesV5 := isBootstrapNodesDefault(ast) + + type rawCfg config + var raw rawCfg + err = toml.UnmarshalTable(ast, &raw) + if err != nil { + return err + } + *c = config(raw) + + if defaultBootstrapNodes { + c.Node.P2P.BootstrapNodes = asDefault + } + if defaultBootstrapNodesV5 { + c.Node.P2P.BootstrapNodesV5 = asDefault + } + + return nil +} diff --git a/cmd/opera/launcher/params.go b/cmd/opera/launcher/params.go index dc7a2c784..2ca2e43d1 100644 --- a/cmd/opera/launcher/params.go +++ b/cmd/opera/launcher/params.go @@ -2,9 +2,6 @@ package launcher import ( "github.com/Fantom-foundation/lachesis-base/hash" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" "github.com/Fantom-foundation/go-opera/opera" @@ -155,18 +152,3 @@ func overrideParams() { params.RinkebyBootnodes = []string{} params.GoerliBootnodes = []string{} } - -// asDefault is slice with one fake element which indicates that network default bootnodes should be substituted -var asDefault = fakeBootnodes() - -func fakeBootnodes() []*enode.Node { - privkey, _ := crypto.HexToECDSA("0b10b00000000000000000000000000000000000000000000000000000000000") - r := &enr.Record{} - enode.SignV4(r, privkey) - n, _ := enode.New(enode.ValidSchemes, r) - return []*enode.Node{n} -} - -func needDefaultBootnodes(nn []*enode.Node) bool { - return len(nn) == len(asDefault) && nn[0] == asDefault[0] -} From 2ae57514998afa2089085a216ef571c1d0eb30c1 Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 6 Jul 2022 23:01:26 +1000 Subject: [PATCH 18/87] test of config marshaling --- cmd/opera/launcher/config_custom_test.go | 58 ++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 cmd/opera/launcher/config_custom_test.go diff --git a/cmd/opera/launcher/config_custom_test.go b/cmd/opera/launcher/config_custom_test.go new file mode 100644 index 000000000..3a7123101 --- /dev/null +++ b/cmd/opera/launcher/config_custom_test.go @@ -0,0 +1,58 @@ +package launcher + +import ( + "bytes" + "testing" + + "github.com/Fantom-foundation/lachesis-base/abft" + "github.com/Fantom-foundation/lachesis-base/utils/cachescale" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/require" + + "github.com/Fantom-foundation/go-opera/evmcore" + "github.com/Fantom-foundation/go-opera/gossip" + "github.com/Fantom-foundation/go-opera/gossip/emitter" + "github.com/Fantom-foundation/go-opera/vecmt" +) + +func TestConfigTOML(t *testing.T) { + cacheRatio := cachescale.Ratio{ + Base: uint64(DefaultCacheSize*1 - ConstantCacheSize), + Target: uint64(DefaultCacheSize*2 - ConstantCacheSize), + } + + src := config{ + Node: defaultNodeConfig(), + Opera: gossip.DefaultConfig(cacheRatio), + Emitter: emitter.DefaultConfig(), + TxPool: evmcore.DefaultTxPoolConfig, + OperaStore: gossip.DefaultStoreConfig(cacheRatio), + Lachesis: abft.DefaultConfig(), + LachesisStore: abft.DefaultStoreConfig(cacheRatio), + VectorClock: vecmt.DefaultConfig(cacheRatio), + cachescale: cacheRatio, + } + + for name, val := range map[string][]*enode.Node{ + "Nil": nil, + "Empty": {}, + "Default": asDefault, + } { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + src.Node.P2P.BootstrapNodes = val + src.Node.P2P.BootstrapNodesV5 = val + + stream, err := tomlSettings.Marshal(&src) + require.NoError(err) + + var got config + err = tomlSettings.NewDecoder(bytes.NewReader(stream)).Decode(&got) + require.NoError(err) + + require.Equal(src.Node.P2P.BootstrapNodes, got.Node.P2P.BootstrapNodes) + require.Equal(src.Node.P2P.BootstrapNodesV5, got.Node.P2P.BootstrapNodesV5) + }) + } +} From 9ce23a7c9f9937a564f72e1ab928cc6aa13ec77b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Fri, 8 Jul 2022 02:01:10 +0700 Subject: [PATCH 19/87] fix initialization of ExtRPCEnabled var --- gossip/config.go | 2 -- gossip/ethapi_backend.go | 5 +++++ gossip/service.go | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/gossip/config.go b/gossip/config.go index 420ecc459..ef8889060 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -100,8 +100,6 @@ type ( // allows only for EIP155 transactions. AllowUnprotectedTxs bool - ExtRPCEnabled bool - RPCBlockExt bool } diff --git a/gossip/ethapi_backend.go b/gossip/ethapi_backend.go index 93d66d88f..f4873b71b 100644 --- a/gossip/ethapi_backend.go +++ b/gossip/ethapi_backend.go @@ -39,6 +39,11 @@ type EthAPIBackend struct { allowUnprotectedTxs bool } +// SetExtRPCEnabled updates extRPCEnabled +func (b *EthAPIBackend) SetExtRPCEnabled(v bool) { + b.extRPCEnabled = v +} + // ChainConfig returns the active chain configuration. func (b *EthAPIBackend) ChainConfig() *params.ChainConfig { return b.svc.store.GetEvmChainConfig() diff --git a/gossip/service.go b/gossip/service.go index 267c2b135..f6b2b42b8 100644 --- a/gossip/service.go +++ b/gossip/service.go @@ -172,6 +172,7 @@ func NewService(stack *node.Node, config Config, store *Store, blockProc BlockPr svc.p2pServer = stack.Server() svc.accountManager = stack.AccountManager() svc.eventMux = stack.EventMux() + svc.EthAPI.SetExtRPCEnabled(stack.Config().ExtRPCEnabled()) // Create the net API service svc.netRPCService = ethapi.NewPublicNetAPI(svc.p2pServer, store.GetRules().NetworkID) svc.haltCheck = haltCheck @@ -269,7 +270,7 @@ func newService(config Config, store *Store, blockProc BlockProc, engine lachesi } // create API backend - svc.EthAPI = &EthAPIBackend{config.ExtRPCEnabled, svc, stateReader, txSigner, config.AllowUnprotectedTxs} + svc.EthAPI = &EthAPIBackend{false, svc, stateReader, txSigner, config.AllowUnprotectedTxs} svc.verWatcher = verwatcher.New(netVerStore) svc.tflusher = svc.makePeriodicFlusher() From 95ba6fc8e16125ab3dee2cadc7173c6c8482fff4 Mon Sep 17 00:00:00 2001 From: alex Date: Thu, 7 Jul 2022 21:35:56 +1000 Subject: [PATCH 20/87] refactor --- cmd/opera/launcher/config_custom_test.go | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/cmd/opera/launcher/config_custom_test.go b/cmd/opera/launcher/config_custom_test.go index 3a7123101..888057c5c 100644 --- a/cmd/opera/launcher/config_custom_test.go +++ b/cmd/opera/launcher/config_custom_test.go @@ -15,7 +15,7 @@ import ( "github.com/Fantom-foundation/go-opera/vecmt" ) -func TestConfigTOML(t *testing.T) { +func TestConfigFile(t *testing.T) { cacheRatio := cachescale.Ratio{ Base: uint64(DefaultCacheSize*1 - ConstantCacheSize), Target: uint64(DefaultCacheSize*2 - ConstantCacheSize), @@ -33,12 +33,22 @@ func TestConfigTOML(t *testing.T) { cachescale: cacheRatio, } + canonical := func(nn []*enode.Node) []*enode.Node { + if len(nn) == 0 { + return []*enode.Node{} + } + return nn + } + for name, val := range map[string][]*enode.Node{ "Nil": nil, "Empty": {}, "Default": asDefault, + "UserDefined": {enode.MustParse( + "enr:-HW4QIEFxJwyZzPQJPE2DbQpEu7FM1Gv99VqJ3CbLb22fm9_V9cfdZdSBpZCyrEb5UfMeC6k9WT0iaaeAjRcuzCfr4yAgmlkgnY0iXNlY3AyNTZrMaECps0D9hhmXEN5BMgVVe0xT5mpYU9zv4YxCdTApmfP-l0", + )}, } { - t.Run(name, func(t *testing.T) { + t.Run(name+"BootstrapNodes", func(t *testing.T) { require := require.New(t) src.Node.P2P.BootstrapNodes = val @@ -51,6 +61,13 @@ func TestConfigTOML(t *testing.T) { err = tomlSettings.NewDecoder(bytes.NewReader(stream)).Decode(&got) require.NoError(err) + { // toml workaround + src.Node.P2P.BootstrapNodes = canonical(src.Node.P2P.BootstrapNodes) + got.Node.P2P.BootstrapNodes = canonical(got.Node.P2P.BootstrapNodes) + src.Node.P2P.BootstrapNodesV5 = canonical(src.Node.P2P.BootstrapNodesV5) + got.Node.P2P.BootstrapNodesV5 = canonical(got.Node.P2P.BootstrapNodesV5) + } + require.Equal(src.Node.P2P.BootstrapNodes, got.Node.P2P.BootstrapNodes) require.Equal(src.Node.P2P.BootstrapNodesV5, got.Node.P2P.BootstrapNodesV5) }) From 0fb8fd2fa2d104067f20c50df3509ac6f0febd11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Thu, 14 Jul 2022 18:54:30 +0700 Subject: [PATCH 21/87] add flag for iprestrict and private node --- cmd/opera/launcher/launcher.go | 2 ++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 7cd2395ae..47d549840 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -87,6 +87,8 @@ func initFlags() { utils.NoDiscoverFlag, utils.DiscoveryV5Flag, utils.NetrestrictFlag, + utils.IPrestrictFlag, + utils.PrivateNodeFlag, utils.NodeKeyFileFlag, utils.NodeKeyHexFlag, } diff --git a/go.mod b/go.mod index de240978d..27b27e284 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,6 @@ require ( gopkg.in/urfave/cli.v1 v1.20.0 ) -replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc5 +replace github.com/ethereum/go-ethereum => github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8 replace github.com/dvyukov/go-fuzz => github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f diff --git a/go.sum b/go.sum index 164b5588e..2fb27dbf7 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc5 h1:3SMNRzp13oZZkOBwOtLqbXVKYGmxe+2Ak94snUd2yxU= -github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc5/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582 h1:gDEbOynFwS7sp19XrtWnA1nEhSIXXO5DuAuT5h/nZgY= github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582/go.mod h1:E6+2LOvgADwSOv0U5YXhRJz4PlX8qJxvq8M93AOC1tM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -220,6 +218,8 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKh github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f h1:GQpcb9ywkaawJfZCFeIcrSWlsZFRTsig42e03dkzc+I= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f/go.mod h1:Q5On640X2Z0YzKOijx9GVhUu/kvHnk9aKoWGMlRDMtc= +github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8 h1:S2eeDb/NAjA2ylCFijPU2lAnKlpcrINVL+DfEDY0XYo= +github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= From 1c456b99cd8d14996030a1f3260bc751c9432ae4 Mon Sep 17 00:00:00 2001 From: Jan Kalina Date: Tue, 26 Jul 2022 14:31:12 +0200 Subject: [PATCH 22/87] Fix genesis export --- cmd/opera/launcher/genesiscmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/opera/launcher/genesiscmd.go b/cmd/opera/launcher/genesiscmd.go index 3ed1603b1..e7b526fd7 100644 --- a/cmd/opera/launcher/genesiscmd.go +++ b/cmd/opera/launcher/genesiscmd.go @@ -323,7 +323,7 @@ func exportGenesis(ctx *cli.Context) error { if mode != "none" { log.Info("Exporting EVM data", "from", fromBlock, "to", toBlock) writer := newUnitWriter(plain) - err := writer.Start(header, genesisstore.BlocksSection, tmpPath) + err := writer.Start(header, genesisstore.EvmSection, tmpPath) if err != nil { return err } From d0edfd0a8a396fe5dd3aeccd503900a3e55ab136 Mon Sep 17 00:00:00 2001 From: quan8 Date: Thu, 28 Jul 2022 08:22:21 +1000 Subject: [PATCH 23/87] update license --- COPYING | 675 +++++++++++++++++++++++++++++++++++++++++++++++++ COPYING.LESSER | 166 ++++++++++++ LICENSE | 21 -- 3 files changed, 841 insertions(+), 21 deletions(-) create mode 100644 COPYING create mode 100644 COPYING.LESSER delete mode 100644 LICENSE diff --git a/COPYING b/COPYING new file mode 100644 index 000000000..53d1f3d01 --- /dev/null +++ b/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/COPYING.LESSER b/COPYING.LESSER new file mode 100644 index 000000000..5357f6918 --- /dev/null +++ b/COPYING.LESSER @@ -0,0 +1,166 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 1aa5901ce..000000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Fantom Foundation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. From 4ab95b11b452bc5416baa5c7ffcf9c7b084af720 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 29 Jun 2022 22:40:26 +0400 Subject: [PATCH 24/87] add caching for event IDs --- gossip/c_event_callbacks.go | 1 + gossip/config.go | 2 ++ gossip/store.go | 6 +++- gossip/store_event.go | 5 +++ utils/eventid/eventid.go | 69 +++++++++++++++++++++++++++++++++++++ 5 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 utils/eventid/eventid.go diff --git a/gossip/c_event_callbacks.go b/gossip/c_event_callbacks.go index e5306e46e..cb397e90c 100644 --- a/gossip/c_event_callbacks.go +++ b/gossip/c_event_callbacks.go @@ -120,6 +120,7 @@ func processLastEvent(lasts *concurrent.ValidatorEventsSet, e *inter.EventPayloa } func (s *Service) switchEpochTo(newEpoch idx.Epoch) { + s.store.cache.EventIDs.Reset(newEpoch) s.store.SetHighestLamport(0) // reset dag indexer s.store.resetEpochStore(newEpoch) diff --git a/gossip/config.go b/gossip/config.go index ef8889060..b3bef6309 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -107,6 +107,8 @@ type ( // Cache size for full events. EventsNum int EventsSize uint + // Cache size for event IDs + EventsIDsNum int // Cache size for full blocks. BlocksNum int BlocksSize uint diff --git a/gossip/store.go b/gossip/store.go index 1da5fe89f..9706d7a61 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -17,6 +17,7 @@ import ( "github.com/Fantom-foundation/go-opera/gossip/evmstore" "github.com/Fantom-foundation/go-opera/logger" "github.com/Fantom-foundation/go-opera/utils/adapters/snap2kvdb" + "github.com/Fantom-foundation/go-opera/utils/eventid" "github.com/Fantom-foundation/go-opera/utils/rlpstore" "github.com/Fantom-foundation/go-opera/utils/switchable" ) @@ -66,7 +67,8 @@ type Store struct { epochStore atomic.Value cache struct { - Events *wlru.Cache `cache:"-"` // store by pointer + Events *wlru.Cache `cache:"-"` // store by pointer + EventIDs *eventid.Cache EventsHeaders *wlru.Cache `cache:"-"` // store by pointer Blocks *wlru.Cache `cache:"-"` // store by pointer BlockHashes *wlru.Cache `cache:"-"` // store by pointer @@ -139,6 +141,8 @@ func (s *Store) initCache() { eventsHeadersCacheSize := nominalSize * uint(eventsHeadersNum) s.cache.EventsHeaders = s.makeCache(eventsHeadersCacheSize, eventsHeadersNum) + s.cache.EventIDs = eventid.NewCache(s.cfg.Cache.EventsIDsNum) + blockEpochStatesNum := s.cfg.Cache.BlockEpochStateNum blockEpochStatesSize := nominalSize * uint(blockEpochStatesNum) s.cache.BlockEpochStateHistory = s.makeCache(blockEpochStatesSize, blockEpochStatesNum) diff --git a/gossip/store_event.go b/gossip/store_event.go index b7e1cea3c..eba98135a 100644 --- a/gossip/store_event.go +++ b/gossip/store_event.go @@ -27,6 +27,7 @@ func (s *Store) DelEvent(id hash.Event) { // Remove from LRU cache. s.cache.Events.Remove(id) s.cache.EventsHeaders.Remove(id) + s.cache.EventIDs.Remove(id) } // SetEvent stores event. @@ -39,6 +40,7 @@ func (s *Store) SetEvent(e *inter.EventPayload) { s.cache.Events.Add(e.ID(), e, uint(e.Size())) eh := e.Event s.cache.EventsHeaders.Add(e.ID(), &eh, nominalSize) + s.cache.EventIDs.Add(e.ID()) } // GetEventPayload returns stored event. @@ -152,6 +154,9 @@ func (s *Store) GetEventPayloadRLP(id hash.Event) rlp.RawValue { // HasEvent returns true if event exists. func (s *Store) HasEvent(h hash.Event) bool { + if has, ok := s.cache.EventIDs.Has(h); ok { + return has + } has, _ := s.table.Events.Has(h.Bytes()) return has } diff --git a/utils/eventid/eventid.go b/utils/eventid/eventid.go new file mode 100644 index 000000000..82424dcc4 --- /dev/null +++ b/utils/eventid/eventid.go @@ -0,0 +1,69 @@ +package eventid + +import ( + "sync" + + "github.com/Fantom-foundation/lachesis-base/hash" + "github.com/Fantom-foundation/lachesis-base/inter/idx" +) + +type Cache struct { + ids map[hash.Event]bool + mu sync.RWMutex + maxSize int + epoch idx.Epoch +} + +func NewCache(maxSize int) *Cache { + return &Cache{ + maxSize: maxSize, + } +} + +func (c *Cache) Reset(epoch idx.Epoch) { + c.mu.Lock() + defer c.mu.Unlock() + c.ids = make(map[hash.Event]bool) + c.epoch = epoch +} + +func (c *Cache) Has(id hash.Event) (has bool, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.ids == nil { + return false, false + } + if c.epoch != id.Epoch() { + return false, false + } + return c.ids[id], true +} + +func (c *Cache) Add(id hash.Event) bool { + c.mu.Lock() + defer c.mu.Unlock() + + if c.ids == nil { + return false + } + if c.epoch != id.Epoch() { + return false + } + if len(c.ids) >= c.maxSize { + c.ids = nil + return false + } + c.ids[id] = true + return true +} + +func (c *Cache) Remove(id hash.Event) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.ids == nil { + return + } + delete(c.ids, id) +} From 00ddec3a60de62ac397c8b9cca3db8e9f21cb87a Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 29 Jun 2022 22:41:36 +0400 Subject: [PATCH 25/87] add caching for LLR voting weights --- gossip/config.go | 16 ++++-- gossip/store.go | 7 +++ gossip/store_llr_block.go | 105 +++++++++++++++++++++++++++++++++----- gossip/store_llr_epoch.go | 13 ++++- 4 files changed, 122 insertions(+), 19 deletions(-) diff --git a/gossip/config.go b/gossip/config.go index b3bef6309..5b6d78bd1 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -114,6 +114,9 @@ type ( BlocksSize uint // Cache size for history block/epoch states. BlockEpochStateNum int + + LlrBlockVotesIndexes int + LlrEpochVotesIndexes int } // StoreConfig is a config for store db. @@ -256,11 +259,14 @@ func (c *Config) Validate() error { func DefaultStoreConfig(scale cachescale.Func) StoreConfig { return StoreConfig{ Cache: StoreCacheConfig{ - EventsNum: scale.I(5000), - EventsSize: scale.U(6 * opt.MiB), - BlocksNum: scale.I(5000), - BlocksSize: scale.U(512 * opt.KiB), - BlockEpochStateNum: scale.I(8), + EventsNum: scale.I(5000), + EventsSize: scale.U(6 * opt.MiB), + EventsIDsNum: scale.I(100000), + BlocksNum: scale.I(5000), + BlocksSize: scale.U(512 * opt.KiB), + BlockEpochStateNum: scale.I(8), + LlrBlockVotesIndexes: scale.I(100), + LlrEpochVotesIndexes: scale.I(5), }, EVM: evmstore.DefaultStoreConfig(scale), MaxNonFlushedSize: 17*opt.MiB + scale.I(5*opt.MiB), diff --git a/gossip/store.go b/gossip/store.go index 9706d7a61..0786012aa 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -82,6 +82,8 @@ type Store struct { KvdbEvmSnap atomic.Value // store by pointer UpgradeHeights atomic.Value // store by pointer Genesis atomic.Value // store by value + LlrBlockVotesIndex *VotesCache // store by pointer + LlrEpochVoteIndex *VotesCache // store by pointer } mutex struct { @@ -146,6 +148,9 @@ func (s *Store) initCache() { blockEpochStatesNum := s.cfg.Cache.BlockEpochStateNum blockEpochStatesSize := nominalSize * uint(blockEpochStatesNum) s.cache.BlockEpochStateHistory = s.makeCache(blockEpochStatesSize, blockEpochStatesNum) + + s.cache.LlrBlockVotesIndex = NewVotesCache(s.cfg.Cache.LlrBlockVotesIndexes, s.flushLlrBlockVoteWeight) + s.cache.LlrEpochVoteIndex = NewVotesCache(s.cfg.Cache.LlrEpochVotesIndexes, s.flushLlrEpochVoteWeight) } // Close closes underlying database. @@ -211,6 +216,8 @@ func (s *Store) Commit() error { s.FlushLastBVs() s.FlushLastEV() s.FlushLlrState() + s.cache.LlrBlockVotesIndex.FlushMutated(s.flushLlrBlockVoteWeight) + s.cache.LlrEpochVoteIndex.FlushMutated(s.flushLlrEpochVoteWeight) es := s.getAnyEpochStore() if es != nil { es.FlushHeads() diff --git a/gossip/store_llr_block.go b/gossip/store_llr_block.go index db3747039..ffbe10de7 100644 --- a/gossip/store_llr_block.go +++ b/gossip/store_llr_block.go @@ -6,6 +6,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/inter/pos" "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/utils/simplewlru" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" @@ -34,7 +35,10 @@ func (s *Store) IterateOverlappingBlockVotesRLP(start []byte, f func(key []byte, } } -func (s *Store) getLlrVoteWeight(reader kvdb.Reader, key []byte) (pos.Weight, bitmap.Set) { +func (s *Store) getLlrVoteWeight(cache *VotesCache, reader kvdb.Reader, cKey VotesCacheID, key []byte) (pos.Weight, bitmap.Set) { + if cached := cache.Get(cKey); cached != nil { + return cached.weight, cached.set + } weightB, err := reader.Get(key) if err != nil { s.Log.Crit("Failed to get key-value", "err", err) @@ -42,31 +46,55 @@ func (s *Store) getLlrVoteWeight(reader kvdb.Reader, key []byte) (pos.Weight, bi if weightB == nil { return 0, nil } - return pos.Weight(bigendian.BytesToUint32(weightB[:4])), weightB[4:] + weight, set := pos.Weight(bigendian.BytesToUint32(weightB[:4])), weightB[4:] + cache.Add(cKey, VotesCacheValue{ + weight: weight, + set: set, + mutated: false, + }) + return weight, set +} + +func (s *Store) flushLlrVoteWeight(table kvdb.Writer, key []byte, weight pos.Weight, set bitmap.Set) { + err := table.Put(key, append(bigendian.Uint32ToBytes(uint32(weight)), set...)) + if err != nil { + s.Log.Crit("Failed to put key-value", "err", err) + } +} + +func (s *Store) flushLlrBlockVoteWeight(cKey VotesCacheID, value VotesCacheValue) { + key := append(cKey.Block.Bytes(), append(cKey.Epoch.Bytes(), cKey.V[:]...)...) + s.flushLlrVoteWeight(s.table.LlrBlockVotesIndex, key, value.weight, value.set) } -func (s *Store) addLlrVoteWeight(table kvdb.Store, key []byte, val idx.Validator, vals idx.Validator, diff pos.Weight) pos.Weight { - weight, set := s.getLlrVoteWeight(table, key) - if set != nil && set.Has(int(val)) { +func (s *Store) addLlrVoteWeight(cache *VotesCache, reader kvdb.Reader, cKey VotesCacheID, key []byte, validator idx.Validator, validatorsNum idx.Validator, diff pos.Weight) pos.Weight { + weight, set := s.getLlrVoteWeight(cache, reader, cKey, key) + if set != nil && set.Has(int(validator)) { // don't count the vote if validator already voted return weight } if set == nil { - set = bitmap.New(int(vals)) + set = bitmap.New(int(validatorsNum)) } - set.Put(int(val)) + set.Put(int(validator)) weight += diff - // save to the DB - err := table.Put(key, append(bigendian.Uint32ToBytes(uint32(weight)), set...)) - if err != nil { - s.Log.Crit("Failed to put key-value", "err", err) - } + // save to cache which will be later flushed to the DB + cache.Add(cKey, VotesCacheValue{ + weight: weight, + set: set, + mutated: true, + }) return weight } func (s *Store) AddLlrBlockVoteWeight(block idx.Block, epoch idx.Epoch, bv hash.Hash, val idx.Validator, vals idx.Validator, diff pos.Weight) pos.Weight { key := append(block.Bytes(), append(epoch.Bytes(), bv[:]...)...) - return s.addLlrVoteWeight(s.table.LlrBlockVotesIndex, key, val, vals, diff) + cKey := VotesCacheID{ + Block: block, + Epoch: epoch, + V: bv, + } + return s.addLlrVoteWeight(s.cache.LlrBlockVotesIndex, s.table.LlrBlockVotesIndex, cKey, key, val, vals, diff) } func (s *Store) SetLlrBlockResult(block idx.Block, bv hash.Hash) { @@ -171,3 +199,54 @@ func (s *Store) IterateFullBlockRecordsRLP(start idx.Block, f func(b idx.Block, } } } + +type VotesCacheID struct { + Block idx.Block + Epoch idx.Epoch + V hash.Hash +} + +type VotesCacheValue struct { + weight pos.Weight + set bitmap.Set + mutated bool +} + +type VotesCache struct { + votes *simplewlru.Cache +} + +func NewVotesCache(maxSize int, evictedFn func(VotesCacheID, VotesCacheValue)) *VotesCache { + votes, _ := simplewlru.NewWithEvict(uint(maxSize), maxSize, func(key interface{}, _value interface{}) { + value := _value.(*VotesCacheValue) + if value.mutated { + evictedFn(key.(VotesCacheID), *value) + } + }) + return &VotesCache{ + votes: votes, + } +} + +func (c *VotesCache) FlushMutated(write func(VotesCacheID, VotesCacheValue)) { + keys := c.votes.Keys() + for _, k := range keys { + val_, _ := c.votes.Peek(k) + val := val_.(*VotesCacheValue) + if val.mutated { + write(k.(VotesCacheID), *val) + val.mutated = false + } + } +} + +func (c *VotesCache) Get(key VotesCacheID) *VotesCacheValue { + if v, ok := c.votes.Get(key); ok { + return v.(*VotesCacheValue) + } + return nil +} + +func (c *VotesCache) Add(key VotesCacheID, val VotesCacheValue) { + c.votes.Add(key, &val, nominalSize) +} diff --git a/gossip/store_llr_epoch.go b/gossip/store_llr_epoch.go index 0129cd768..c0d258d0c 100644 --- a/gossip/store_llr_epoch.go +++ b/gossip/store_llr_epoch.go @@ -4,6 +4,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/inter/pos" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" "github.com/Fantom-foundation/go-opera/inter" @@ -32,9 +33,19 @@ func (s *Store) iterateEpochVotesRLP(prefix []byte, f func(key []byte, ev rlp.Ra } } +func (s *Store) flushLlrEpochVoteWeight(cKey VotesCacheID, value VotesCacheValue) { + key := append(cKey.Epoch.Bytes(), cKey.V[:]...) + s.flushLlrVoteWeight(s.table.LlrEpochVoteIndex, key, value.weight, value.set) +} + func (s *Store) AddLlrEpochVoteWeight(epoch idx.Epoch, ev hash.Hash, val idx.Validator, vals idx.Validator, diff pos.Weight) pos.Weight { key := append(epoch.Bytes(), ev[:]...) - return s.addLlrVoteWeight(s.table.LlrEpochVoteIndex, key, val, vals, diff) + cKey := VotesCacheID{ + Block: 0, + Epoch: epoch, + V: ev, + } + return s.addLlrVoteWeight(s.cache.LlrEpochVoteIndex, s.table.LlrEpochVoteIndex, cKey, key, val, vals, diff) } func (s *Store) SetLlrEpochResult(epoch idx.Epoch, ev hash.Hash) { From aae378ad1dad0e6cd9ded52691beea30bceeb5e0 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 29 Jun 2022 22:45:19 +0400 Subject: [PATCH 26/87] fix keeping mutating memory from it.Value() --- gossip/store_llr_epoch.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gossip/store_llr_epoch.go b/gossip/store_llr_epoch.go index c0d258d0c..a10f680e8 100644 --- a/gossip/store_llr_epoch.go +++ b/gossip/store_llr_epoch.go @@ -23,11 +23,11 @@ func (s *Store) HasEpochVote(epoch idx.Epoch, id hash.Event) bool { return ok } -func (s *Store) iterateEpochVotesRLP(prefix []byte, f func(key []byte, ev rlp.RawValue) bool) { +func (s *Store) iterateEpochVotesRLP(prefix []byte, f func(ev rlp.RawValue) bool) { it := s.table.LlrEpochVotes.NewIterator(prefix, nil) defer it.Release() for it.Next() { - if !f(it.Key(), it.Value()) { + if !f(common.CopyBytes(it.Value())) { break } } @@ -84,7 +84,7 @@ func (s *Store) IterateEpochPacksRLP(start idx.Epoch, f func(epoch idx.Epoch, ep epoch := idx.BytesToEpoch(it.Key()) evs := make([]rlp.RawValue, 0, 20) - s.iterateEpochVotesRLP(it.Key(), func(key []byte, ev rlp.RawValue) bool { + s.iterateEpochVotesRLP(it.Key(), func(ev rlp.RawValue) bool { evs = append(evs, ev) return len(evs) < maxEpochPackVotes }) From d224554a3a8aa272bafd38264ce2f66fe14ff760 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 29 Jun 2022 22:46:08 +0400 Subject: [PATCH 27/87] erase support for outdated DB migrations --- gossip/store_migration.go | 286 +------------------------------------- 1 file changed, 4 insertions(+), 282 deletions(-) diff --git a/gossip/store_migration.go b/gossip/store_migration.go index cc109e8c3..12491fd8d 100644 --- a/gossip/store_migration.go +++ b/gossip/store_migration.go @@ -3,19 +3,14 @@ package gossip import ( "errors" "fmt" - "math/big" "github.com/Fantom-foundation/lachesis-base/hash" - "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/table" - "github.com/Fantom-foundation/lachesis-base/lachesis" "github.com/ethereum/go-ethereum/common" "github.com/Fantom-foundation/go-opera/inter" "github.com/Fantom-foundation/go-opera/inter/iblockproc" - "github.com/Fantom-foundation/go-opera/opera" - "github.com/Fantom-foundation/go-opera/utils/concurrent" "github.com/Fantom-foundation/go-opera/utils/migration" ) @@ -41,10 +36,10 @@ func (s *Store) migrations() *migration.Migration { return migration. Begin("opera-gossip-store"). Next("used gas recovery", unsupportedMigration). - Next("tx hashes recovery", s.recoverTxHashes). - Next("DAG heads recovery", s.recoverHeadsStorage). - Next("DAG last events recovery", s.recoverLastEventsStorage). - Next("BlockState recovery", s.recoverBlockState). + Next("tx hashes recovery", unsupportedMigration). + Next("DAG heads recovery", unsupportedMigration). + Next("DAG last events recovery", unsupportedMigration). + Next("BlockState recovery", unsupportedMigration). Next("LlrState recovery", s.recoverLlrState). Next("erase gossip-async db", s.eraseGossipAsyncDB). Next("erase SFC API table", s.eraseSfcApiTable). @@ -79,271 +74,6 @@ func fixEventTxHashes(e *inter.EventPayload) { } } -type kvEntry struct { - key []byte - value []byte -} - -func (s *Store) recoverTxHashes() error { - if s.GetRules().NetworkID != 0xfa { - return nil - } - - diff1 := []kvEntry{ - {key: common.Hex2Bytes("4c720000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000007"), value: nil}, - {key: common.Hex2Bytes("4c720000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000008"), value: nil}, - {key: common.Hex2Bytes("4c720000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000009"), value: nil}, - {key: common.Hex2Bytes("4c720000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7000000000000000a"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000000020000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000007"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000000020000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000008"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000001030000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7000000000000000a"), value: nil}, - {key: common.Hex2Bytes("4c7400000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d7371020000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7000000000000000a"), value: nil}, - {key: common.Hex2Bytes("4c7400000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d7371030000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000009"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000004d5362dd18ea4ba880c829b0152b7ba371741e59030000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000007"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000007"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000008"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000009"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093000000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7000000000000000a"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093020000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000009"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093030000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000008"), value: nil}, - {key: common.Hex2Bytes("4c7490890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a15010000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7000000000000000a"), value: nil}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000007"), value: nil}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000008"), value: nil}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba70000000000000009"), value: nil}, - {key: common.Hex2Bytes("4c720000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000007"), value: common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef00000000000000000000000000000000000000000000000000000000000000000000000000000000000000004d5362dd18ea4ba880c829b0152b7ba371741e5900001718000003d9d4038f394e6b06123d0dc96da0e15ef6e16a799dcf0277df5cc61a78f164885776aa610fb0fe1257df78e59b000000000000000000000000000000000000000000000000502eca46e3971c71")}, - {key: common.Hex2Bytes("4c720000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000008"), value: common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef00000000000000000000000000000000000000000000000000000000000000000000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09300001718000003d9d4038f394e6b06123d0dc96da0e15ef6e16a799dcf0277df5cc61a78f164885776aa610fb0fe1257df78e59b00000000000000000000000000000000000000000000000321d3e6c4e3e71c71")}, - {key: common.Hex2Bytes("4c720000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000009"), value: common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef0000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09300000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d737100001718000003d9d4038f394e6b06123d0dc96da0e15ef6e16a799dcf0277df5cc61a78f164885776aa610fb0fe1257df78e59b0000000000000000000000000000000000000000000000000439248ee41549c0")}, - {key: common.Hex2Bytes("4c720000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458000000000000000a"), value: common.Hex2Bytes("90890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a1500000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d7371000000000000000000000000000000000000000000000000000000000000000100001718000003d9d4038f394e6b06123d0dc96da0e15ef6e16a799dcf0277df9083ea3756bde6ee6f27a6e996806fbd37f6f0930000000000000000000000000000000000000000000000000000000000000000")}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000000020000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000007"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000000020000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000008"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000000000000000000000000000000000000000000001030000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458000000000000000a"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c7400000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d7371020000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458000000000000000a"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c7400000000000000000000000004d02149058cc8c8d0cf5f6fd1dc5394a80d7371030000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000009"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000004d5362dd18ea4ba880c829b0152b7ba371741e59030000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000007"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000007"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000008"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b000000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000009"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093000000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458000000000000000a"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093020000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000009"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f093030000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000008"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c7490890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a15010000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458000000000000000a"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000007"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000008"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef010000000000484f05b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea304580000000000000009"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("78497bf2b3f14a9d4f4e8ce89fddc26e4c912393e9f6a71dc188c19fa7115cfba7"), value: common.Hex2Bytes("e783453462a000000000000000000000000000000000000000000000000000000000000000008001")}, - {key: common.Hex2Bytes("78b6840d4c0eb562b0b1731760223d91b36edc6c160958e23e773e6058eea30458"), value: common.Hex2Bytes("e783484f05a000001718000003d4d3955bf592e12fb80a60574fa4b18bd5805b4c010d75e86d0202")}, - } - - diff2 := []kvEntry{ - {key: common.Hex2Bytes("4c72000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000004"), value: nil}, - {key: common.Hex2Bytes("4c72000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000005"), value: nil}, - {key: common.Hex2Bytes("4c74000000000000000000000000000000000000000000000000000000000000000103000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000005"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a802000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000005"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a803000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000004"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b00000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000004"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09300000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000005"), value: nil}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09302000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000004"), value: nil}, - {key: common.Hex2Bytes("4c7490890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a1501000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000005"), value: nil}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef01000000000049431bc511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb0000000000000004"), value: nil}, - {key: common.Hex2Bytes("4c72000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000004"), value: common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef0000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f0930000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a80000179e00000c49ff37137ee31a4020047bbff39016c3df896319816f2cfe565cc61a78f164885776aa610fb0fe1257df78e59b00000000000000000000000000000000000000000000000f5e7017a20b72fcb3")}, - {key: common.Hex2Bytes("4c72000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000005"), value: common.Hex2Bytes("90890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a150000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a800000000000000000000000000000000000000000000000000000000000000010000179e00000c49ff37137ee31a4020047bbff39016c3df896319816f2cfe569083ea3756bde6ee6f27a6e996806fbd37f6f0930000000000000000000000000000000000000000000000000000000000000000")}, - {key: common.Hex2Bytes("4c74000000000000000000000000000000000000000000000000000000000000000103000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000005"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a802000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000005"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000001325625ae81846e80ac9d0b8113f31e1f8b479a803000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000004"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000005cc61a78f164885776aa610fb0fe1257df78e59b00000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000004"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09300000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000005"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c740000000000000000000000009083ea3756bde6ee6f27a6e996806fbd37f6f09302000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000004"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c7490890809c654f11d6e72a28fa60149770a0d11ec6c92319d6ceb2bb0a4ea1a1501000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000005"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("4c74ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef01000000000049431b3aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a405340000000000000004"), value: common.Hex2Bytes("03")}, - {key: common.Hex2Bytes("783aeede91740093cb8feb1296a34cf70d86d2f802cff860edd798978e94a40534"), value: common.Hex2Bytes("e78349431ba00000179e00000c464d756a7614d0ca067fcb37ee4452004bf308c9df561e85e88001")}, - {key: common.Hex2Bytes("78c511216e8bff6c347014ed695cb308f2792d07fd30079f12286768716b5bfacb"), value: common.Hex2Bytes("e78344dd6ca000000000000000000000000000000000000000000000000000000000000000008080")}, - } - - diff := []kvEntry{} - blockIdx := s.GetLatestBlockIndex() - if blockIdx >= fixTxBlock1 { - diff = append(diff, diff1...) - } - if blockIdx >= fixTxBlock2 { - diff = append(diff, diff2...) - } - - for _, kv := range diff { - var err error - if kv.value != nil { - err = s.mainDB.Put(kv.key, kv.value) - } else { - err = s.mainDB.Delete(kv.key) - } - if err != nil { - return err - } - } - - return nil -} - -func (s *Store) recoverHeadsStorage() error { - s.loadEpochStore(s.GetEpoch()) - es := s.getEpochStore(s.GetEpoch()) - it := es.table.Heads.NewIterator(nil, nil) - defer it.Release() - heads := make(hash.EventsSet) - for it.Next() { - // note: key may be empty if DB was committed before being migrated, which is possible between migration steps - if len(it.Key()) > 0 { - heads.Add(hash.BytesToEvent(it.Key())) - } - _ = es.table.Heads.Delete(it.Key()) - } - es.SetHeads(concurrent.WrapEventsSet(heads)) - es.FlushHeads() - return nil -} - -func (s *Store) recoverLastEventsStorage() error { - s.loadEpochStore(s.GetEpoch()) - es := s.getEpochStore(s.GetEpoch()) - it := es.table.LastEvents.NewIterator(nil, nil) - defer it.Release() - lasts := make(map[idx.ValidatorID]hash.Event) - for it.Next() { - // note: key may be empty if DB was committed before being migrated, which is possible between migration steps - if len(it.Key()) > 0 { - lasts[idx.BytesToValidatorID(it.Key())] = hash.BytesToEvent(it.Value()) - } - _ = es.table.LastEvents.Delete(it.Key()) - } - es.SetLastEvents(concurrent.WrapValidatorEventsSet(lasts)) - es.FlushLastEvents() - return nil -} - -type ValidatorBlockStateV0 struct { - Cheater bool - LastEvent hash.Event - Uptime inter.Timestamp - LastOnlineTime inter.Timestamp - LastGasPowerLeft inter.GasPowerLeft - LastBlock idx.Block - DirtyGasRefund uint64 - Originated *big.Int -} - -type BlockStateV0 struct { - LastBlock iblockproc.BlockCtx - FinalizedStateRoot hash.Hash - - EpochGas uint64 - EpochCheaters lachesis.Cheaters - - ValidatorStates []ValidatorBlockStateV0 - NextValidatorProfiles iblockproc.ValidatorProfiles - - DirtyRules opera.Rules - - AdvanceEpochs idx.Epoch -} - -type BlockEpochStateV0 struct { - BlockState *BlockStateV0 - EpochState *iblockproc.EpochStateV0 -} - -func (s *Store) convertBlockEpochStateV0(oldEBS *BlockEpochStateV0) BlockEpochState { - oldES := oldEBS.EpochState - oldBS := oldEBS.BlockState - - newValidatorState := make([]iblockproc.ValidatorBlockState, len(oldBS.ValidatorStates)) - cheatersWritten := 0 - for i, vs := range oldBS.ValidatorStates { - lastEvent := &inter.Event{} - if vs.LastEvent != hash.ZeroEvent { - lastEvent = s.GetEvent(vs.LastEvent) - } - newValidatorState[i] = iblockproc.ValidatorBlockState{ - LastEvent: iblockproc.EventInfo{ - ID: vs.LastEvent, - GasPowerLeft: lastEvent.GasPowerLeft(), - Time: lastEvent.MedianTime(), - }, - Uptime: vs.Uptime, - LastOnlineTime: vs.LastOnlineTime, - LastGasPowerLeft: vs.LastGasPowerLeft, - LastBlock: vs.LastBlock, - DirtyGasRefund: vs.DirtyGasRefund, - Originated: vs.Originated, - } - if vs.Cheater { - cheatersWritten++ - } - } - - newBS := &iblockproc.BlockState{ - LastBlock: oldBS.LastBlock, - FinalizedStateRoot: oldBS.FinalizedStateRoot, - EpochGas: oldBS.EpochGas, - EpochCheaters: oldBS.EpochCheaters, - CheatersWritten: uint32(cheatersWritten), - ValidatorStates: newValidatorState, - NextValidatorProfiles: oldBS.NextValidatorProfiles, - DirtyRules: &oldBS.DirtyRules, - AdvanceEpochs: oldBS.AdvanceEpochs, - } - if oldES.Rules.String() == oldBS.DirtyRules.String() { - newBS.DirtyRules = nil - } - - newEs := &iblockproc.EpochState{ - Epoch: oldES.Epoch, - EpochStart: oldES.EpochStart, - PrevEpochStart: oldES.PrevEpochStart, - EpochStateRoot: oldES.EpochStateRoot, - Validators: oldES.Validators, - ValidatorStates: make([]iblockproc.ValidatorEpochState, len(oldES.ValidatorStates)), - ValidatorProfiles: oldES.ValidatorProfiles, - Rules: oldES.Rules, - } - for i, v := range oldES.ValidatorStates { - newEs.ValidatorStates[i].GasRefund = v.GasRefund - newEs.ValidatorStates[i].PrevEpochEvent.ID = v.PrevEpochEvent - lastEvent := &inter.Event{} - if v.PrevEpochEvent != hash.ZeroEvent { - lastEvent = s.GetEvent(v.PrevEpochEvent) - } - newEs.ValidatorStates[i].PrevEpochEvent.Time = lastEvent.MedianTime() - newEs.ValidatorStates[i].PrevEpochEvent.GasPowerLeft = lastEvent.GasPowerLeft() - } - - return BlockEpochState{ - BlockState: newBS, - EpochState: newEs, - } -} - -func (s *Store) recoverBlockState() error { - // current block state - v0, ok := s.rlp.Get(s.table.BlockEpochState, []byte(sKey), &BlockEpochStateV0{}).(*BlockEpochStateV0) - if !ok { - return errors.New("epoch state reading failed: genesis not applied") - } - v1 := s.convertBlockEpochStateV0(v0) - s.SetBlockEpochState(*v1.BlockState, *v1.EpochState) - s.FlushBlockEpochState() - - // history block state - for epoch := idx.Epoch(1); epoch <= v0.EpochState.Epoch; epoch++ { - v, ok := s.rlp.Get(s.table.BlockEpochStateHistory, epoch.Bytes(), &BlockEpochStateV0{}).(*BlockEpochStateV0) - if !ok { - continue - } - v1 = s.convertBlockEpochStateV0(v) - s.SetHistoryBlockEpochState(epoch, *v1.BlockState, *v1.EpochState) - } - - return nil -} - func (s *Store) recoverLlrState() error { v1, ok := s.rlp.Get(s.table.BlockEpochState, []byte(sKey), &BlockEpochState{}).(*BlockEpochState) if !ok { @@ -367,19 +97,11 @@ func (s *Store) eraseSfcApiTable() error { sfcapiTable := table.New(s.mainDB, []byte("S")) it := sfcapiTable.NewIterator(nil, nil) defer it.Release() - i := 0 for it.Next() { err := sfcapiTable.Delete(it.Key()) if err != nil { return err } - i++ - if i%1000 == 0 && s.IsCommitNeeded() { - err := s.Commit() - if err != nil { - return err - } - } } return nil } From 7985f36f1786bc32a771c502679c8657cf02b6df Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 28 Jun 2022 08:32:24 +0400 Subject: [PATCH 28/87] adjust genesis processing ETA logging --- opera/genesisstore/filelog/filelog.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/opera/genesisstore/filelog/filelog.go b/opera/genesisstore/filelog/filelog.go index 8dde3509a..b6c71b894 100644 --- a/opera/genesisstore/filelog/filelog.go +++ b/opera/genesisstore/filelog/filelog.go @@ -30,9 +30,10 @@ func (f *Filelog) Read(p []byte) (n int, err error) { } else if f.consumed > 0 && f.consumed < f.size && time.Since(f.prevLog) >= f.period { elapsed := time.Since(f.start) eta := float64(f.size-f.consumed) / float64(f.consumed) * float64(elapsed) - eta *= 1.2 // show slightly higher ETA as performance degrades over larger volumes of data - progress := fmt.Sprintf("%.2f%%", 100*float64(f.consumed)/float64(f.size)) - log.Info(fmt.Sprintf("- Reading %s", f.name), "progress", progress, "elapsed", utils.PrettyDuration(elapsed), "eta", utils.PrettyDuration(eta)) + progress := float64(f.consumed) / float64(f.size) + eta *= 1.0 + (1.0-progress)/2.0 // show slightly higher ETA as performance degrades over larger volumes of data + progressStr := fmt.Sprintf("%.2f%%", 100*progress) + log.Info(fmt.Sprintf("- Reading %s", f.name), "progress", progressStr, "elapsed", utils.PrettyDuration(elapsed), "eta", utils.PrettyDuration(eta)) f.prevLog = time.Now() } return From 16bc8694978b198d52361fd4f245cfc555b7ea6b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 28 Jun 2022 13:01:54 +0400 Subject: [PATCH 29/87] fullsync syncs LLR only if synced up events --- gossip/handler.go | 9 +++++++++ gossip/sync.go | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/gossip/handler.go b/gossip/handler.go index fb962ebab..4063d49e0 100644 --- a/gossip/handler.go +++ b/gossip/handler.go @@ -299,6 +299,9 @@ func newHandler( return epoch, llrs.LowestBlockToDecide }, MaxEpochToDecide: func() idx.Epoch { + if !h.syncStatus.RequestLLR() { + return 0 + } return h.store.GetLlrState().LowestEpochToFill }, IsProcessed: h.store.HasBlockVotes, @@ -330,6 +333,9 @@ func newHandler( return h.store.GetLlrState().LowestBlockToFill }, MaxBlockToFill: func() idx.Block { + if !h.syncStatus.RequestLLR() { + return 0 + } // rough estimation for the max fill-able block llrs := h.store.GetLlrState() start := llrs.LowestBlockToFill @@ -372,6 +378,9 @@ func newHandler( return llrs.LowestEpochToDecide }, MaxEpochToFetch: func() idx.Epoch { + if !h.syncStatus.RequestLLR() { + return 0 + } return h.store.GetLlrState().LowestEpochToDecide + 10000 }, IsProcessed: h.store.HasHistoryBlockEpochState, diff --git a/gossip/sync.go b/gossip/sync.go index a68c13473..595136d2f 100644 --- a/gossip/sync.go +++ b/gossip/sync.go @@ -63,6 +63,10 @@ func (ss *syncStatus) AcceptTxs() bool { return ss.MaybeSynced() && ss.Is(ssEvents) } +func (ss *syncStatus) RequestLLR() bool { + return !ss.Is(ssEvents) || ss.MaybeSynced() +} + type txsync struct { p *peer txids []common.Hash From 13f3e01e7fdbd4e05ee5f904a8a7746f43044f58 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 28 Jun 2022 13:17:45 +0400 Subject: [PATCH 30/87] stricter synced condition --- gossip/handler.go | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/gossip/handler.go b/gossip/handler.go index 4063d49e0..15a306c2f 100644 --- a/gossip/handler.go +++ b/gossip/handler.go @@ -931,11 +931,15 @@ func (h *handler) handleEvents(p *peer, events dag.Events, ordered bool) { // filter too high events notTooHigh := make(dag.Events, 0, len(events)) sessionCfg := h.config.Protocol.DagStreamLeecher.Session + now := time.Now() for _, e := range events { maxLamport := h.store.GetHighestLamport() + idx.Lamport(sessionCfg.DefaultChunkItemsNum+1)*idx.Lamport(sessionCfg.ParallelChunksDownload) if e.Lamport() <= maxLamport { notTooHigh = append(notTooHigh, e) } + if now.Sub(e.(inter.EventI).CreationTime().Time()) < 10*time.Minute { + h.syncStatus.MarkMaybeSynced() + } } if len(events) != len(notTooHigh) { h.dagLeecher.ForceSyncing() @@ -945,7 +949,6 @@ func (h *handler) handleEvents(p *peer, events dag.Events, ordered bool) { } // Schedule all the events for connection peer := *p - now := time.Now() requestEvents := func(ids []interface{}) error { return peer.RequestEvents(interfacesToEventIDs(ids)) } @@ -978,8 +981,6 @@ func (h *handler) handleMsg(p *peer) error { } defer h.msgSemaphore.Release(eventsSizeEst) - myEpoch := h.store.GetEpoch() - // Handle the message depending on its contents switch { case msg.Code == HandshakeMsg: @@ -992,9 +993,6 @@ func (h *handler) handleMsg(p *peer) error { return errResp(ErrDecode, "%v: %v", msg, err) } p.SetProgress(progress) - if progress.Epoch == myEpoch { - h.syncStatus.MarkMaybeSynced() - } case msg.Code == EvmTxsMsg: // Transactions arrived, make sure we have a valid and fresh graph to handle them @@ -1441,14 +1439,6 @@ func (h *handler) onNewEpochLoop() { select { case myEpoch := <-h.newEpochsCh: h.dagProcessor.Clear() - if !h.syncStatus.MaybeSynced() { - // Mark initial sync done on any peer which has the same epoch - for _, peer := range h.peers.List() { - if peer.progress.Epoch == myEpoch { - h.syncStatus.MarkMaybeSynced() - } - } - } h.dagLeecher.OnNewEpoch(myEpoch) // Err() channel will be closed when unsubscribing. case <-h.newEpochsSub.Err(): From 5d4da7748ce738a8e21676047c351b54db84ee69 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 27 Mar 2022 01:23:17 +0400 Subject: [PATCH 31/87] add support of multidb config --- cmd/opera/launcher/check.go | 34 ++- cmd/opera/launcher/config.go | 15 +- cmd/opera/launcher/dbcmd.go | 5 +- cmd/opera/launcher/export.go | 35 +-- cmd/opera/launcher/fixdirty.go | 7 +- cmd/opera/launcher/genesiscmd.go | 5 +- cmd/opera/launcher/import.go | 6 +- cmd/opera/launcher/launcher.go | 11 +- cmd/opera/launcher/snapshotcmd.go | 17 +- go.mod | 10 +- go.sum | 198 +++++++++++++- gossip/c_llr_callbacks_test.go | 12 +- gossip/evmstore/store.go | 64 +++-- gossip/evmstore/store_test.go | 4 +- gossip/store.go | 35 ++- gossip/store_epoch.go | 4 +- gossip/store_migration.go | 5 +- integration/assembly.go | 128 +++++---- integration/bench_db_flush_test.go | 19 +- integration/db.go | 363 ++++++++++++------------- integration/makefakegenesis/genesis.go | 2 +- integration/makegenesis/genesis.go | 10 +- integration/metric.go | 141 ++++++++++ integration/routing.go | 107 ++++++++ utils/adapters/ethdb2kvdb/adapter.go | 4 + utils/adapters/snap2kvdb/adapter.go | 2 + 26 files changed, 870 insertions(+), 373 deletions(-) create mode 100644 integration/metric.go create mode 100644 integration/routing.go diff --git a/cmd/opera/launcher/check.go b/cmd/opera/launcher/check.go index 23cd4d12d..43278ba48 100644 --- a/cmd/opera/launcher/check.go +++ b/cmd/opera/launcher/check.go @@ -5,6 +5,8 @@ import ( "time" "github.com/Fantom-foundation/lachesis-base/inter/idx" + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -14,6 +16,34 @@ import ( "github.com/Fantom-foundation/go-opera/inter" ) +func makeRawDbsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProducer { + dbsList, err := integration.SupportedDBs(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs.RuntimeCache) + if err != nil { + utils.Fatalf("Failed to initialize DB producers: %v", err) + } + if err := checkStateInitialized(dbsList); err != nil { + utils.Fatalf(err.Error()) + } + return dbsList +} + +func makeMultiRawDbsProducer(dbsList map[multidb.TypeName]kvdb.IterableDBProducer, cfg *config) kvdb.FullDBProducer { + multiRawDbs, err := integration.MakeRawMultiProducer(dbsList, cfg.DBs.Routing) + if err != nil { + utils.Fatalf("Failed to initialize multi DB producer: %v", err) + } + return multiRawDbs +} + +func makeRawDbsProducer(cfg *config) kvdb.FullDBProducer { + dbsList := makeRawDbsProducers(cfg) + multiRawDbs, err := integration.MakeRawMultiProducer(dbsList, cfg.DBs.Routing) + if err != nil { + utils.Fatalf("Failed to initialize multi DB producer: %v", err) + } + return multiRawDbs +} + func checkEvm(ctx *cli.Context) error { if len(ctx.Args()) != 0 { utils.Fatalf("This command doesn't require an argument.") @@ -21,8 +51,8 @@ func checkEvm(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index fffb308c3..f315d4eac 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -148,7 +148,7 @@ type config struct { Lachesis abft.Config LachesisStore abft.StoreConfig VectorClock vecmt.IndexConfig - cachescale cachescale.Func + DBs integration.DBsConfig } func (c *config) AppConfigs() integration.Configs { @@ -158,6 +158,7 @@ func (c *config) AppConfigs() integration.Configs { Lachesis: c.Lachesis, LachesisStore: c.LachesisStore, VectorClock: c.VectorClock, + DBs: c.DBs, } } @@ -369,7 +370,6 @@ func mayMakeAllConfigs(ctx *cli.Context) (*config, error) { Lachesis: abft.DefaultConfig(), LachesisStore: abft.DefaultStoreConfig(cacheRatio), VectorClock: vecmt.DefaultConfig(cacheRatio), - cachescale: cacheRatio, } if ctx.GlobalIsSet(FakeNetFlag.Name) { @@ -388,6 +388,17 @@ func mayMakeAllConfigs(ctx *cli.Context) (*config, error) { return &cfg, err } } + // apply default for DB config if it wasn't touched by config file + dbDefault := integration.DefaultDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) + if len(cfg.DBs.Routing.Table) == 0 { + cfg.DBs.Routing = dbDefault.Routing + } + if len(cfg.DBs.GenesisCache.Table) == 0 { + cfg.DBs.GenesisCache = dbDefault.GenesisCache + } + if len(cfg.DBs.RuntimeCache.Table) == 0 { + cfg.DBs.RuntimeCache = dbDefault.RuntimeCache + } // Apply flags (high priority) var err error diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index 0ed8a5c40..66c65cb5d 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -2,14 +2,11 @@ package launcher import ( "fmt" - "path" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" - - "github.com/Fantom-foundation/go-opera/integration" ) var ( @@ -41,7 +38,7 @@ func compact(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) + rawProducer := makeRawDbsProducer(cfg) for _, name := range rawProducer.Names() { db, err := rawProducer.OpenDB(name) defer db.Close() diff --git a/cmd/opera/launcher/export.go b/cmd/opera/launcher/export.go index 8ef77dd35..696431b39 100644 --- a/cmd/opera/launcher/export.go +++ b/cmd/opera/launcher/export.go @@ -5,7 +5,6 @@ import ( "errors" "io" "os" - "path" "strconv" "strings" "time" @@ -13,6 +12,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -40,8 +40,8 @@ func exportEvents(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } @@ -93,30 +93,25 @@ func exportEvents(ctx *cli.Context) error { return nil } -func checkStateInitialized(rawProducer kvdb.IterableDBProducer) error { - names := rawProducer.Names() - if len(names) == 0 { - return errors.New("datadir is not initialized") - } +func checkStateInitialized(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) error { // if flushID is not written, then previous genesis processing attempt was interrupted - for _, name := range names { - db, err := rawProducer.OpenDB(name) - if err != nil { - return err - } - flushID, _ := db.Get(integration.FlushIDKey) - _ = db.Close() - if flushID != nil { - return nil + for _, rawProducer := range rawProducers { + for _, name := range rawProducer.Names() { + db, err := rawProducer.OpenDB(name) + if err != nil { + return err + } + flushID, _ := db.Get(integration.FlushIDKey) + _ = db.Close() + if flushID != nil { + return nil + } } } return errors.New("datadir is not initialized") } func makeRawGossipStore(rawProducer kvdb.IterableDBProducer, cfg *config) (*gossip.Store, error) { - if err := checkStateInitialized(rawProducer); err != nil { - return nil, err - } dbs := &integration.DummyFlushableProducer{rawProducer} gdb := gossip.NewStore(dbs, cfg.OperaStore) return gdb, nil diff --git a/cmd/opera/launcher/fixdirty.go b/cmd/opera/launcher/fixdirty.go index 011afa5e7..7b414f0d1 100644 --- a/cmd/opera/launcher/fixdirty.go +++ b/cmd/opera/launcher/fixdirty.go @@ -2,7 +2,6 @@ package launcher import ( "fmt" - "path" "strings" "time" @@ -42,7 +41,7 @@ func fixDirty(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) log.Info("Opening databases") - producer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) + producer := makeRawDbsProducer(cfg) // reverts the gossip database state epochState, err := fixDirtyGossipDb(producer, cfg) @@ -66,7 +65,7 @@ func fixDirty(ctx *cli.Context) error { // prepare consensus database from epochState log.Info("Recreating lachesis db") cMainDb = mustOpenDB(producer, "lachesis") - cGetEpochDB := func(epoch idx.Epoch) kvdb.DropableStore { + cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { return mustOpenDB(producer, fmt.Sprintf("lachesis-%d", epoch)) } cdb := abft.NewStore(cMainDb, cGetEpochDB, panics("Lachesis store"), cfg.LachesisStore) @@ -184,7 +183,7 @@ func clearDirtyFlags(rawProducer kvdb.IterableDBProducer) error { return nil } -func mustOpenDB(producer kvdb.DBProducer, name string) kvdb.DropableStore { +func mustOpenDB(producer kvdb.DBProducer, name string) kvdb.Store { db, err := producer.OpenDB(name) if err != nil { utils.Fatalf("Failed to open '%s' database: %v", name, err) diff --git a/cmd/opera/launcher/genesiscmd.go b/cmd/opera/launcher/genesiscmd.go index e7b526fd7..778e768e1 100644 --- a/cmd/opera/launcher/genesiscmd.go +++ b/cmd/opera/launcher/genesiscmd.go @@ -20,7 +20,6 @@ import ( "gopkg.in/urfave/cli.v1" "github.com/Fantom-foundation/go-opera/gossip/evmstore" - "github.com/Fantom-foundation/go-opera/integration" "github.com/Fantom-foundation/go-opera/inter/ibr" "github.com/Fantom-foundation/go-opera/inter/ier" "github.com/Fantom-foundation/go-opera/opera/genesis" @@ -208,8 +207,8 @@ func exportGenesis(ctx *cli.Context) error { _ = os.RemoveAll(tmpPath) defer os.RemoveAll(tmpPath) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cacheScaler(ctx)) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } diff --git a/cmd/opera/launcher/import.go b/cmd/opera/launcher/import.go index f9dd737ff..6c90dadd1 100644 --- a/cmd/opera/launcher/import.go +++ b/cmd/opera/launcher/import.go @@ -9,7 +9,6 @@ import ( "math" "os" "os/signal" - "path" "strings" "syscall" "time" @@ -25,7 +24,6 @@ import ( "github.com/Fantom-foundation/go-opera/gossip" "github.com/Fantom-foundation/go-opera/gossip/emitter" - "github.com/Fantom-foundation/go-opera/integration" "github.com/Fantom-foundation/go-opera/inter" "github.com/Fantom-foundation/go-opera/opera/genesisstore" "github.com/Fantom-foundation/go-opera/utils/ioread" @@ -38,8 +36,8 @@ func importEvm(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 7cd2395ae..59e446a86 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -2,7 +2,6 @@ package launcher import ( "fmt" - "os" "path" "sort" "strings" @@ -284,16 +283,13 @@ func makeNode(ctx *cli.Context, cfg *config, genesisStore *genesisstore.Store) ( errlock.SetDefaultDatadir(cfg.Node.DataDir) errlock.Check() - chaindataDir := path.Join(cfg.Node.DataDir, "chaindata") - if err := os.MkdirAll(chaindataDir, 0700); err != nil { - utils.Fatalf("Failed to create chaindata directory: %v", err) - } var g *genesis.Genesis if genesisStore != nil { gv := genesisStore.Genesis() g = &gv } - engine, dagIndex, gdb, cdb, blockProc := integration.MakeEngine(integration.DBProducer(chaindataDir, cfg.cachescale), g, cfg.AppConfigs()) + + engine, dagIndex, gdb, cdb, blockProc, closeDBs := integration.MakeEngine(path.Join(cfg.Node.DataDir, "chaindata"), g, cfg.AppConfigs()) if genesisStore != nil { _ = genesisStore.Close() } @@ -373,6 +369,9 @@ func makeNode(ctx *cli.Context, cfg *config, genesisStore *genesisstore.Store) ( _ = stack.Close() gdb.Close() _ = cdb.Close() + if closeDBs != nil { + closeDBs() + } } } diff --git a/cmd/opera/launcher/snapshotcmd.go b/cmd/opera/launcher/snapshotcmd.go index a7901ae12..91135415b 100644 --- a/cmd/opera/launcher/snapshotcmd.go +++ b/cmd/opera/launcher/snapshotcmd.go @@ -35,7 +35,6 @@ import ( "github.com/Fantom-foundation/go-opera/gossip/evmstore" "github.com/Fantom-foundation/go-opera/gossip/evmstore/evmpruner" - "github.com/Fantom-foundation/go-opera/integration" ) var ( @@ -156,8 +155,8 @@ It's also usable without snapshot enabled. func pruneState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } @@ -224,8 +223,8 @@ func pruneState(ctx *cli.Context) error { func verifyState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } @@ -268,8 +267,8 @@ func verifyState(ctx *cli.Context) error { // contract codes are present. func traverseState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } @@ -360,8 +359,8 @@ func traverseState(ctx *cli.Context) error { // but it will check each trie node. func traverseRawState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := integration.DBProducer(path.Join(cfg.Node.DataDir, "chaindata"), cfg.cachescale) - gdb, err := makeRawGossipStore(rawProducer, cfg) + rawDbs := makeRawDbsProducer(cfg) + gdb, err := makeRawGossipStore(rawDbs, cfg) if err != nil { log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) } diff --git a/go.mod b/go.mod index de240978d..0f3fa7048 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,10 @@ module github.com/Fantom-foundation/go-opera go 1.14 require ( - github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582 + github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194 github.com/allegro/bigcache v1.2.1 // indirect github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40 // indirect github.com/cespare/cp v1.1.1 - github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.7.1 github.com/docker/docker v1.13.1 @@ -18,7 +17,7 @@ require ( github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/getsentry/raven-go v0.2.0 // indirect github.com/go-kit/kit v0.9.0 // indirect - github.com/golang/mock v1.3.1 + github.com/golang/mock v1.6.0 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/holiman/bloomfilter/v2 v2.0.3 github.com/julienschmidt/httprouter v1.3.0 // indirect @@ -33,13 +32,14 @@ require ( github.com/rjeczalik/notify v0.9.2 // indirect github.com/sirupsen/logrus v1.4.2 github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 - github.com/stretchr/testify v1.7.0 - github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 + github.com/stretchr/testify v1.7.2 + github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/tyler-smith/go-bip39 v1.0.2 github.com/uber/jaeger-client-go v2.20.1+incompatible github.com/uber/jaeger-lib v2.2.0+incompatible go.uber.org/atomic v1.5.1 // indirect golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a golang.org/x/tools v0.1.5 // indirect gopkg.in/urfave/cli.v1 v1.20.0 ) diff --git a/go.sum b/go.sum index 164b5588e..fc696afd3 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= @@ -33,17 +34,25 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc5 h1:3SMNRzp13oZZkOBwOtLqbXVKYGmxe+2Ak94snUd2yxU= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc5/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582 h1:gDEbOynFwS7sp19XrtWnA1nEhSIXXO5DuAuT5h/nZgY= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582/go.mod h1:E6+2LOvgADwSOv0U5YXhRJz4PlX8qJxvq8M93AOC1tM= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194 h1:5GX97Ta7SeIyrpct33VT1UxOBGO7gBxq2FkPvAdsJgc= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o= github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -52,6 +61,7 @@ github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKS github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= @@ -61,6 +71,7 @@ github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7 github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= @@ -92,10 +103,28 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= +github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= +github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= +github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20220524133354-f30672e7240b h1:adWRp3wA48w5X02do3Py3muX2KIG9XCfG8YJSyTTsRs= +github.com/cockroachdb/pebble v0.0.0-20220524133354-f30672e7240b/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= +github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw= +github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -109,8 +138,10 @@ github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14y github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= @@ -119,23 +150,34 @@ github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/evalphobia/logrus_sentry v0.8.2 h1:dotxHq+YLZsT1Bb45bB5UQbfCh3gM/nFFetyN46VoDQ= github.com/evalphobia/logrus_sentry v0.8.2/go.mod h1:pKcp+vriitUqu9KiWj/VRFbRfFNUwz95/UkgG8a6MNc= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= @@ -143,18 +185,26 @@ github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -164,9 +214,17 @@ github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -174,11 +232,13 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -187,11 +247,17 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -202,11 +268,15 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -214,17 +284,20 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f h1:GQpcb9ywkaawJfZCFeIcrSWlsZFRTsig42e03dkzc+I= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f/go.mod h1:Q5On640X2Z0YzKOijx9GVhUu/kvHnk9aKoWGMlRDMtc= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= @@ -233,7 +306,10 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= @@ -249,6 +325,10 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= @@ -261,19 +341,32 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 h1:ZHuwnjpP8LsVsUYqTqeVAI+GfDfJ6UNPrExZF+vX/DQ= github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -288,10 +381,12 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -303,6 +398,7 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -312,39 +408,63 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -372,10 +492,14 @@ github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -385,9 +509,13 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48 h1:ju5UTwk5Odtm4trrY+4Ca4RMj5OyXbmVeDAVad2T0Jw= github.com/status-im/keycard-go v0.0.0-20190424133014-d95853db0f48/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= @@ -398,11 +526,16 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= +github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= @@ -415,12 +548,25 @@ github.com/uber/jaeger-client-go v2.20.1+incompatible h1:HgqpYBng0n7tLJIlyT4kPCI github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -433,9 +579,11 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -453,6 +601,8 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -470,6 +620,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -477,9 +628,11 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -487,6 +640,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -496,8 +650,12 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d h1:4SFsTMi4UahlKoloni7L4eYzhFRifURQLw+yv0QDCx8= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -519,6 +677,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -528,9 +687,11 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -545,6 +706,7 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -553,10 +715,17 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3 h1:3Ad41xy2WCESpufXwgs7NpDSu+vjxqLt2UFqUV+20bI= +golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -565,6 +734,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -573,12 +744,14 @@ golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -597,7 +770,10 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -605,6 +781,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= @@ -623,6 +801,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -637,11 +816,14 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -649,6 +831,9 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -656,6 +841,9 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= @@ -674,6 +862,8 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gossip/c_llr_callbacks_test.go b/gossip/c_llr_callbacks_test.go index db2451904..2e0d05fc5 100644 --- a/gossip/c_llr_callbacks_test.go +++ b/gossip/c_llr_callbacks_test.go @@ -838,8 +838,10 @@ func (s *IntegrationTestSuite) TestFullRepeater() { fullRepeater.compareParams() // 2. Comparing mainDb of generator and fullRepeater - genKVMap := fetchTable(s.generator.store.mainDB) - fullRepKVMap := fetchTable(s.processor.store.mainDB) + genKVDB, _ := s.generator.store.dbs.OpenDB("gossip") + fullRepKVDB, _ := s.processor.store.dbs.OpenDB("gossip") + genKVMap := fetchTable(genKVDB) + fullRepKVMap := fetchTable(fullRepKVDB) subsetOf := func(aa, bb map[string]string) { for _k, _v := range aa { @@ -859,8 +861,10 @@ func (s *IntegrationTestSuite) TestFullRepeater() { s.T().Log("Checking genKVs == fullKVs") checkEqual(genKVMap, fullRepKVMap) - genKVMapAfterIndexLogs := fetchTable(s.generator.store.mainDB) - fullRepKVMapAfterIndexLogs := fetchTable(s.processor.store.mainDB) + genKVMapAfterIndexLogsDB, _ := s.generator.store.dbs.OpenDB("gossip") + fullRepKVMapAfterIndexLogsDB, _ := s.processor.store.dbs.OpenDB("gossip") + genKVMapAfterIndexLogs := fetchTable(genKVMapAfterIndexLogsDB) + fullRepKVMapAfterIndexLogs := fetchTable(fullRepKVMapAfterIndexLogsDB) // comparing the states checkEqual(genKVMap, genKVMapAfterIndexLogs) diff --git a/gossip/evmstore/store.go b/gossip/evmstore/store.go index 69b37c6b0..c74497bcc 100644 --- a/gossip/evmstore/store.go +++ b/gossip/evmstore/store.go @@ -2,7 +2,6 @@ package evmstore import ( "errors" - "sync" "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/kvdb" @@ -33,8 +32,7 @@ const nominalSize uint = 1 type Store struct { cfg StoreConfig - mainDB kvdb.Store - table struct { + table struct { Evm kvdb.Store `table:"M"` Logs kvdb.Store `table:"L"` // API-only tables @@ -54,10 +52,6 @@ type Store struct { EvmBlocks *wlru.Cache `cache:"-"` // store by pointer } - mutex struct { - Inc sync.Mutex - } - rlp rlpstore.Helper triegc *prque.Prque // Priority queue mapping block numbers to tries to gc @@ -70,40 +64,68 @@ const ( ) // NewStore creates store over key-value db. -func NewStore(mainDB kvdb.Store, cfg StoreConfig) *Store { +func NewStore(dbs kvdb.DBProducer, cfg StoreConfig) *Store { s := &Store{ cfg: cfg, - mainDB: mainDB, Instance: logger.New("evm-store"), rlp: rlpstore.Helper{logger.New("rlp")}, triegc: prque.New(nil), } - table.MigrateTables(&s.table, s.mainDB) + err := table.OpenTables(&s.table, dbs, "evm") + if err != nil { + s.Log.Crit("Failed to open tables", "err", err) + } - s.EvmDb = rawdb.NewDatabase( - kvdb2ethdb.Wrap( - nokeyiserr.Wrap( - s.table.Evm))) - s.EvmState = state.NewDatabaseWithConfig(s.EvmDb, &trie.Config{ - Cache: cfg.Cache.EvmDatabase / opt.MiB, - Journal: cfg.Cache.TrieCleanJournal, - Preimages: cfg.EnablePreimageRecording, - GreedyGC: cfg.Cache.GreedyGC, - }) + s.initEVMDB() s.EvmLogs = topicsdb.New(s.table.Logs) - s.initCache() return s } +// Close closes underlying database. +func (s *Store) Close() { + setnil := func() interface{} { + return nil + } + + _ = table.CloseTables(&s.table) + table.MigrateTables(&s.table, nil) + table.MigrateCaches(&s.cache, setnil) +} + func (s *Store) initCache() { s.cache.Receipts = s.makeCache(s.cfg.Cache.ReceiptsSize, s.cfg.Cache.ReceiptsBlocks) s.cache.TxPositions = s.makeCache(nominalSize*uint(s.cfg.Cache.TxPositions), s.cfg.Cache.TxPositions) s.cache.EvmBlocks = s.makeCache(s.cfg.Cache.EvmBlocksSize, s.cfg.Cache.EvmBlocksNum) } +func (s *Store) initEVMDB() { + s.EvmDb = rawdb.NewDatabase( + kvdb2ethdb.Wrap( + nokeyiserr.Wrap( + s.table.Evm))) + s.EvmState = state.NewDatabaseWithConfig(s.EvmDb, &trie.Config{ + Cache: s.cfg.Cache.EvmDatabase / opt.MiB, + Journal: s.cfg.Cache.TrieCleanJournal, + Preimages: s.cfg.EnablePreimageRecording, + GreedyGC: s.cfg.Cache.GreedyGC, + }) +} + +func (s *Store) ResetWithEVMDB(evmStore kvdb.Store) *Store { + cp := *s + cp.table.Evm = evmStore + cp.initEVMDB() + cp.Snaps = nil + return &cp +} + +func (s *Store) EVMDB() kvdb.Store { + return s.table.Evm +} + func (s *Store) GenerateEvmSnapshot(root common.Hash, rebuild, async bool) (err error) { if s.Snaps != nil { return errors.New("EVM snapshot is already opened") diff --git a/gossip/evmstore/store_test.go b/gossip/evmstore/store_test.go index 4dfce7c5b..736a43302 100644 --- a/gossip/evmstore/store_test.go +++ b/gossip/evmstore/store_test.go @@ -7,11 +7,11 @@ import ( func cachedStore() *Store { cfg := LiteStoreConfig() - return NewStore(memorydb.New(), cfg) + return NewStore(memorydb.NewProducer(""), cfg) } func nonCachedStore() *Store { cfg := StoreConfig{} - return NewStore(memorydb.New(), cfg) + return NewStore(memorydb.NewProducer(""), cfg) } diff --git a/gossip/store.go b/gossip/store.go index 0786012aa..eb2f8fc48 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -27,10 +27,9 @@ type Store struct { dbs kvdb.FlushableDBProducer cfg StoreConfig - mainDB kvdb.Store - snapshotedDB *switchable.Snapshot - evm *evmstore.Store - table struct { + snapshotedEVMDB *switchable.Snapshot + evm *evmstore.Store + table struct { Version kvdb.Store `table:"_"` // Main DAG tables @@ -106,23 +105,21 @@ func NewMemStore() *Store { // NewStore creates store over key-value db. func NewStore(dbs kvdb.FlushableDBProducer, cfg StoreConfig) *Store { - mainDB, err := dbs.OpenDB("gossip") - if err != nil { - log.Crit("Failed to open DB", "name", "gossip", "err", err) - } s := &Store{ dbs: dbs, cfg: cfg, - mainDB: mainDB, Instance: logger.New("gossip-store"), prevFlushTime: time.Now(), rlp: rlpstore.Helper{logger.New("rlp")}, } - table.MigrateTables(&s.table, s.mainDB) + err := table.OpenTables(&s.table, dbs, "gossip") + if err != nil { + log.Crit("Failed to open DB", "name", "gossip", "err", err) + } s.initCache() - s.evm = evmstore.NewStore(s.mainDB, cfg.EVM) + s.evm = evmstore.NewStore(dbs, cfg.EVM) if err := s.migrateData(); err != nil { s.Log.Crit("Failed to migrate Gossip DB", "err", err) @@ -159,11 +156,12 @@ func (s *Store) Close() { return nil } + _ = table.CloseTables(&s.table) table.MigrateTables(&s.table, nil) table.MigrateCaches(&s.cache, setnil) - _ = s.mainDB.Close() _ = s.closeEpochStore() + s.evm.Close() } func (s *Store) IsCommitNeeded() bool { @@ -242,27 +240,28 @@ func (s *Store) CaptureEvmKvdbSnapshot() { } gen, err := s.evm.Snaps.Generating() if err != nil { - s.Log.Error("Failed to initialize EVM snapshot for frozen KVDB", "err", err) + s.Log.Error("Failed to check EVM snapshot generation", "err", err) return } if gen { return } - newKvdbSnap, err := s.mainDB.GetSnapshot() + newEvmKvdbSnap, err := s.evm.EVMDB().GetSnapshot() if err != nil { s.Log.Error("Failed to initialize frozen KVDB", "err", err) return } - if s.snapshotedDB == nil { - s.snapshotedDB = switchable.Wrap(newKvdbSnap) + if s.snapshotedEVMDB == nil { + s.snapshotedEVMDB = switchable.Wrap(newEvmKvdbSnap) } else { - old := s.snapshotedDB.SwitchTo(newKvdbSnap) + old := s.snapshotedEVMDB.SwitchTo(newEvmKvdbSnap) // release only after DB is atomically switched if old != nil { old.Release() } } - newStore := evmstore.NewStore(snap2kvdb.Wrap(s.snapshotedDB), evmstore.LiteStoreConfig()) + newStore := s.evm.ResetWithEVMDB(snap2kvdb.Wrap(s.snapshotedEVMDB)) + newStore.Snaps = nil root := s.GetBlockState().FinalizedStateRoot err = s.generateSnapshotAt(newStore, common.Hash(root), false, false) if err != nil { diff --git a/gossip/store_epoch.go b/gossip/store_epoch.go index a63bb98ad..d11809b15 100644 --- a/gossip/store_epoch.go +++ b/gossip/store_epoch.go @@ -24,7 +24,7 @@ var ( type ( epochStore struct { epoch idx.Epoch - db kvdb.DropableStore + db kvdb.Store table struct { LastEvents kvdb.Store `table:"t"` Heads kvdb.Store `table:"H"` @@ -39,7 +39,7 @@ type ( } ) -func newEpochStore(epoch idx.Epoch, db kvdb.DropableStore) *epochStore { +func newEpochStore(epoch idx.Epoch, db kvdb.Store) *epochStore { es := &epochStore{ epoch: epoch, db: db, diff --git a/gossip/store_migration.go b/gossip/store_migration.go index 12491fd8d..5c46b0e88 100644 --- a/gossip/store_migration.go +++ b/gossip/store_migration.go @@ -6,7 +6,6 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/table" "github.com/ethereum/go-ethereum/common" "github.com/Fantom-foundation/go-opera/inter" @@ -22,7 +21,7 @@ func isEmptyDB(db kvdb.Iteratee) bool { func (s *Store) migrateData() error { versions := migration.NewKvdbIDStore(s.table.Version) - if isEmptyDB(s.mainDB) { + if isEmptyDB(s.table.Version) { // short circuit if empty DB versions.SetID(s.migrations().ID()) return nil @@ -94,7 +93,7 @@ func (s *Store) recoverLlrState() error { } func (s *Store) eraseSfcApiTable() error { - sfcapiTable := table.New(s.mainDB, []byte("S")) + sfcapiTable, _ := s.dbs.OpenDB("gossip/S") it := sfcapiTable.NewIterator(nil, nil) defer it.Release() for it.Next() { diff --git a/integration/assembly.go b/integration/assembly.go index 998d59216..6cf577017 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -9,10 +9,11 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -25,7 +26,9 @@ import ( ) var ( - FlushIDKey = hexutils.HexToBytes("0068c2927bf842c3e9e2f1364494a33a752db334b9a819534bc9f17d2c3b4e5970008ff519d35a86f29fcaa5aae706b75dee871f65f174fcea1747f2915fc92158f6bfbf5eb79f65d16225738594bffb0c") + MetadataPrefix = hexutils.HexToBytes("0068c2927bf842c3e9e2f1364494a33a752db334b9a819534bc9f17d2c3b4e5970008ff519d35a86f29fcaa5aae706b75dee871f65f174fcea1747f2915fc92158f6bfbf5eb79f65d16225738594bffb") + FlushIDKey = append(common.CopyBytes(MetadataPrefix), 0x0c) + TablesKey = append(common.CopyBytes(MetadataPrefix), 0x0d) ) // GenesisMismatchError is raised when trying to overwrite an existing @@ -45,6 +48,7 @@ type Configs struct { Lachesis abft.Config LachesisStore abft.StoreConfig VectorClock vecmt.IndexConfig + DBs DBsConfig } func panics(name string) func(error) { @@ -53,7 +57,7 @@ func panics(name string) func(error) { } } -func mustOpenDB(producer kvdb.DBProducer, name string) kvdb.DropableStore { +func mustOpenDB(producer kvdb.DBProducer, name string) kvdb.Store { db, err := producer.OpenDB(name) if err != nil { utils.Fatalf("Failed to open '%s' database: %v", name, err) @@ -65,7 +69,7 @@ func getStores(producer kvdb.FlushableDBProducer, cfg Configs) (*gossip.Store, * gdb := gossip.NewStore(producer, cfg.OperaStore) cMainDb := mustOpenDB(producer, "lachesis") - cGetEpochDB := func(epoch idx.Epoch) kvdb.DropableStore { + cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { return mustOpenDB(producer, fmt.Sprintf("lachesis-%d", epoch)) } cdb := abft.NewStore(cMainDb, cGetEpochDB, panics("Lachesis store"), cfg.LachesisStore) @@ -101,23 +105,8 @@ func rawMakeEngine(gdb *gossip.Store, cdb *abft.Store, g *genesis.Genesis, cfg C return engine, vecClock, blockProc, nil } -func makeFlushableProducer(rawProducer kvdb.IterableDBProducer) (*flushable.SyncedPool, error) { - existingDBs := rawProducer.Names() - err := CheckDBList(existingDBs) - if err != nil { - return nil, fmt.Errorf("malformed chainstore: %v", err) - } - dbs := flushable.NewSyncedPool(rawProducer, FlushIDKey) - err = dbs.Initialize(existingDBs) - if err != nil { - return nil, fmt.Errorf("failed to open existing databases: %v", err) - } - return dbs, nil -} - -func applyGenesis(rawProducer kvdb.DBProducer, g genesis.Genesis, cfg Configs) error { - rawDbs := &DummyFlushableProducer{rawProducer} - gdb, cdb := getStores(rawDbs, cfg) +func applyGenesis(dbs kvdb.FlushableDBProducer, g genesis.Genesis, cfg Configs) error { + gdb, cdb := getStores(dbs, cfg) defer gdb.Close() defer cdb.Close() log.Info("Applying genesis state") @@ -132,35 +121,47 @@ func applyGenesis(rawProducer kvdb.DBProducer, g genesis.Genesis, cfg Configs) e return nil } -func makeEngine(rawProducer kvdb.IterableDBProducer, g *genesis.Genesis, emptyStart bool, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, error) { - dbs, err := makeFlushableProducer(rawProducer) +func migrate(dbs kvdb.FlushableDBProducer, cfg Configs) error { + gdb, cdb := getStores(dbs, cfg) + defer gdb.Close() + defer cdb.Close() + err := gdb.Commit() if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, err + return err } + return nil +} - if emptyStart { - if g == nil { - return nil, nil, nil, nil, gossip.BlockProc{}, fmt.Errorf("missing --genesis flag for an empty datadir") +func makeEngine(genesisProducers, runtimeProducers map[multidb.TypeName]kvdb.IterableDBProducer, g *genesis.Genesis, emptyStart bool, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func(), error) { + { + if emptyStart && g == nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("missing --genesis flag for an empty datadir") } - // close flushable DBs and open raw DBs for performance reasons - err := dbs.Close() + // open raw DBs for performance reasons + dbs, err := MakeRawMultiProducer(genesisProducers, cfg.DBs.Routing) if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, fmt.Errorf("failed to close existing databases: %v", err) + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to make DB multi-producer: %v", err) } - - err = applyGenesis(rawProducer, *g, cfg) - if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, fmt.Errorf("failed to apply genesis state: %v", err) - } - - // re-open dbs - dbs, err = makeFlushableProducer(rawProducer) - if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, err + if emptyStart { + err = applyGenesis(dbs, *g, cfg) + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to apply genesis state: %v", err) + } + } else { + err = migrate(dbs, cfg) + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to migrate state: %v", err) + } } } + // open flushable DBs + dbs, closeDBs, err := MakeFlushableMultiProducer(runtimeProducers, cfg.DBs.Routing) + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + } var wdbs kvdb.FlushableDBProducer + // final DB wrappers if metrics.Enabled { wdbs = WrapDatabaseWithMetrics(dbs) } else { @@ -171,6 +172,7 @@ func makeEngine(rawProducer kvdb.IterableDBProducer, g *genesis.Genesis, emptySt if err != nil { gdb.Close() cdb.Close() + closeDBs() } }() @@ -178,52 +180,66 @@ func makeEngine(rawProducer kvdb.IterableDBProducer, g *genesis.Genesis, emptySt genesisID := gdb.GetGenesisID() if genesisID == nil { err = errors.New("malformed chainstore: genesis ID is not written") - return nil, nil, nil, nil, gossip.BlockProc{}, err + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } if g != nil { if *genesisID != g.GenesisID { err = &GenesisMismatchError{*genesisID, g.GenesisID} - return nil, nil, nil, nil, gossip.BlockProc{}, err + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } } engine, vecClock, blockProc, err := rawMakeEngine(gdb, cdb, nil, cfg) if err != nil { err = fmt.Errorf("failed to make engine: %v", err) - return nil, nil, nil, nil, gossip.BlockProc{}, err + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } - err = gdb.Commit() - if err != nil { - err = fmt.Errorf("failed to commit DBs: %v", err) - return nil, nil, nil, nil, gossip.BlockProc{}, err + if emptyStart { + err = gdb.Commit() + if err != nil { + err = fmt.Errorf("failed to commit DBs: %v", err) + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + } } - return engine, vecClock, gdb, cdb, blockProc, nil + return engine, vecClock, gdb, cdb, blockProc, closeDBs, nil } // MakeEngine makes consensus engine from config. -func MakeEngine(rawProducer kvdb.IterableDBProducer, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc) { - dropAllDBsIfInterrupted(rawProducer) - existingDBs := rawProducer.Names() +func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func()) { + MakeDBDirs(chaindataDir) + // use increased DB cache for genesis processing + genesisProducers, err := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) + if err != nil { + utils.Fatalf("Failed to initialize DB producers: %v", err) + } + runtimeProducers, err := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) + if err != nil { + utils.Fatalf("Failed to initialize DB producers: %v", err) + } + + firstLaunch := dropAllDBsIfInterrupted(runtimeProducers) - engine, vecClock, gdb, cdb, blockProc, err := makeEngine(rawProducer, g, len(existingDBs) == 0, cfg) + engine, vecClock, gdb, cdb, blockProc, closeDBs, err := makeEngine(genesisProducers, runtimeProducers, g, firstLaunch, cfg) if err != nil { - if len(existingDBs) == 0 { - dropAllDBs(rawProducer) + if firstLaunch { + for _, producer := range runtimeProducers { + dropAllDBs(producer) + } } utils.Fatalf("Failed to make engine: %v", err) } rules := gdb.GetRules() genesisID := gdb.GetGenesisID() - if len(existingDBs) == 0 { + if firstLaunch { log.Info("Applied genesis state", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) } else { log.Info("Genesis is already written", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) } - return engine, vecClock, gdb, cdb, blockProc + return engine, vecClock, gdb, cdb, blockProc, closeDBs } // SetAccountKey sets key into accounts manager and unlocks it with pswd. diff --git a/integration/bench_db_flush_test.go b/integration/bench_db_flush_test.go index fd3699346..acca6eabb 100644 --- a/integration/bench_db_flush_test.go +++ b/integration/bench_db_flush_test.go @@ -8,11 +8,8 @@ import ( "github.com/Fantom-foundation/lachesis-base/abft" "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" - "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" "github.com/Fantom-foundation/lachesis-base/utils/cachescale" "github.com/ethereum/go-ethereum/common" - "github.com/syndtr/goleveldb/leveldb/opt" "github.com/Fantom-foundation/go-opera/gossip" "github.com/Fantom-foundation/go-opera/integration/makefakegenesis" @@ -22,21 +19,24 @@ import ( ) func BenchmarkFlushDBs(b *testing.B) { - rawProducer, dir := dbProducer("flush_bench") + dir := tmpDir("flush_bench") defer os.RemoveAll(dir) genStore := makefakegenesis.FakeGenesisStore(1, utils.ToFtm(1), utils.ToFtm(1)) g := genStore.Genesis() - _, _, store, s2, _ := MakeEngine(rawProducer, &g, Configs{ + _, _, store, s2, _, closeDBs := MakeEngine(dir, &g, Configs{ Opera: gossip.DefaultConfig(cachescale.Identity), OperaStore: gossip.DefaultStoreConfig(cachescale.Identity), Lachesis: abft.DefaultConfig(), LachesisStore: abft.DefaultStoreConfig(cachescale.Identity), VectorClock: vecmt.DefaultConfig(cachescale.Identity), + DBs: DefaultDBsConfig(cachescale.Identity.U64, 512), }) + defer closeDBs() defer store.Close() defer s2.Close() b.ResetTimer() for i := 0; i < b.N; i++ { + b.StopTimer() n := idx.Block(0) randUint32s := func() []uint32 { arr := make([]uint32, 128) @@ -58,6 +58,7 @@ func BenchmarkFlushDBs(b *testing.B) { }) n++ } + b.StartTimer() err := store.Commit() if err != nil { b.Fatal(err) @@ -65,14 +66,10 @@ func BenchmarkFlushDBs(b *testing.B) { } } -func cache64mb(string) int { - return 64 * opt.MiB -} - -func dbProducer(name string) (kvdb.IterableDBProducer, string) { +func tmpDir(name string) string { dir, err := ioutil.TempDir("", name) if err != nil { panic(err) } - return leveldb.NewProducer(dir, cache64mb), dir + return dir } diff --git a/integration/db.go b/integration/db.go index 01f515f65..cd3ce6187 100644 --- a/integration/db.go +++ b/integration/db.go @@ -1,194 +1,154 @@ package integration import ( - "errors" - "fmt" "io/ioutil" + "os" + "path" "strings" - "sync" - "time" - "github.com/Fantom-foundation/go-opera/gossip" "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/dag" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" - "github.com/Fantom-foundation/lachesis-base/utils/cachescale" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" + "github.com/Fantom-foundation/lachesis-base/kvdb/pebble" + "github.com/Fantom-foundation/lachesis-base/utils/fmtfilter" + "github.com/ethereum/go-ethereum/cmd/utils" "github.com/syndtr/goleveldb/leveldb/opt" -) -const ( - // metricsGatheringInterval specifies the interval to retrieve leveldb database - // compaction, io and pause stats to report to the user. - metricsGatheringInterval = 3 * time.Second + "github.com/Fantom-foundation/go-opera/gossip" ) -type DBProducerWithMetrics struct { - kvdb.FlushableDBProducer -} - -type DropableStoreWithMetrics struct { - kvdb.DropableStore - - diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read - diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written - - quitLock sync.Mutex // Mutex protecting the quit channel access - quitChan chan chan error // Quit channel to stop the metrics collection before closing the database - - log log.Logger // Contextual logger tracking the database path -} - -func WrapDatabaseWithMetrics(db kvdb.FlushableDBProducer) kvdb.FlushableDBProducer { - wrapper := &DBProducerWithMetrics{db} - return wrapper -} - -func WrapStoreWithMetrics(ds kvdb.DropableStore) *DropableStoreWithMetrics { - wrapper := &DropableStoreWithMetrics{ - DropableStore: ds, - quitChan: make(chan chan error), - } - return wrapper -} - -func (ds *DropableStoreWithMetrics) Close() error { - ds.quitLock.Lock() - defer ds.quitLock.Unlock() - - if ds.quitChan != nil { - errc := make(chan error) - ds.quitChan <- errc - if err := <-errc; err != nil { - ds.log.Error("Metrics collection failed", "err", err) - } - ds.quitChan = nil - } - return ds.DropableStore.Close() -} - -func (ds *DropableStoreWithMetrics) meter(refresh time.Duration) { - // Create storage for iostats. - var iostats [2]float64 - - var ( - errc chan error - merr error - ) - - timer := time.NewTimer(refresh) - defer timer.Stop() - // Iterate ad infinitum and collect the stats - for i := 1; errc == nil && merr == nil; i++ { - // Retrieve the database iostats. - ioStats, err := ds.Stat("leveldb.iostats") - if err != nil { - ds.log.Error("Failed to read database iostats", "err", err) - merr = err - continue - } - var nRead, nWrite float64 - parts := strings.Split(ioStats, " ") - if len(parts) < 2 { - ds.log.Error("Bad syntax of ioStats", "ioStats", ioStats) - merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) - continue - } - if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { - ds.log.Error("Bad syntax of read entry", "entry", parts[0]) - merr = err - continue - } - if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { - log.Error("Bad syntax of write entry", "entry", parts[1]) - merr = err - continue - } - if ds.diskReadMeter != nil { - ds.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) - } - if ds.diskWriteMeter != nil { - ds.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) - } - iostats[0], iostats[1] = nRead, nWrite - - // Sleep a bit, then repeat the stats collection - select { - case errc = <-ds.quitChan: - // Quit requesting, stop hammering the database - case <-timer.C: - timer.Reset(refresh) - // Timeout, gather a new set of stats - } - } - if errc == nil { - errc = <-ds.quitChan +type DBsConfig struct { + Routing RoutingConfig + RuntimeCache DBsCacheConfig + GenesisCache DBsCacheConfig +} + +type DBCacheConfig struct { + Cache uint64 + Fdlimit uint64 +} + +type DBsCacheConfig struct { + Table map[string]DBCacheConfig +} + +func DefaultDBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { + return DBsConfig{ + Routing: DefaultRoutingConfig(), + RuntimeCache: DefaultRuntimeDBsCacheConfig(scale, fdlimit), + GenesisCache: DefaultGenesisDBsCacheConfig(scale, fdlimit), + } +} + +func DefaultRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "evm-data": { + Cache: scale(242 * opt.MiB), + Fdlimit: fdlimit*242/700 + 1, + }, + "evm-logs": { + Cache: scale(110 * opt.MiB), + Fdlimit: fdlimit*110/700 + 1, + }, + "main": { + Cache: scale(186 * opt.MiB), + Fdlimit: fdlimit*186/700 + 1, + }, + "events": { + Cache: scale(87 * opt.MiB), + Fdlimit: fdlimit*87/700 + 1, + }, + "epoch-%d": { + Cache: scale(75 * opt.MiB), + Fdlimit: fdlimit*75/700 + 1, + }, + "": { + Cache: 64 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func DefaultGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "evm-data": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "evm-logs": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "events": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "epoch-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "": { + Cache: 16 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName]kvdb.IterableDBProducer, error) { + if chaindataDir == "inmemory" || chaindataDir == "" { + chaindataDir, _ = ioutil.TempDir("", "opera-tmp") } - errc <- merr -} - -func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.DropableStore, error) { - ds, err := db.FlushableDBProducer.OpenDB(name) + cacher, err := dbCacheFdlimit(cfg) if err != nil { return nil, err } - dm := WrapStoreWithMetrics(ds) - if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { - name = "epochs" - } - logger := log.New("database", name) - dm.log = logger - dm.diskReadMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/read", nil) - dm.diskWriteMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/write", nil) - - // Start up the metrics gathering and return - go dm.meter(metricsGatheringInterval) - return dm, nil -} - -func DBProducer(chaindataDir string, scale cachescale.Func) kvdb.IterableDBProducer { - if chaindataDir == "inmemory" || chaindataDir == "" { - chaindataDir, _ = ioutil.TempDir("", "opera-integration") - } - - return leveldb.NewProducer(chaindataDir, func(name string) int { - return dbCacheSize(name, scale.I) - }) -} - -func CheckDBList(names []string) error { - if len(names) == 0 { - return nil - } - namesMap := make(map[string]bool) - for _, name := range names { - namesMap[name] = true - } - if !namesMap["gossip"] { - return errors.New("gossip DB is not found") - } - if !namesMap["lachesis"] { - return errors.New("lachesis DB is not found") - } - return nil -} - -func dbCacheSize(name string, scale func(int) int) int { - if name == "gossip" { - return scale(128 * opt.MiB) - } - if name == "lachesis" { - return scale(4 * opt.MiB) - } - if strings.HasPrefix(name, "lachesis-") { - return scale(8 * opt.MiB) - } - if strings.HasPrefix(name, "gossip-") { - return scale(8 * opt.MiB) + return map[multidb.TypeName]kvdb.IterableDBProducer{ + "leveldb": leveldb.NewProducer(path.Join(chaindataDir, "leveldb"), cacher), + "pebble": pebble.NewProducer(path.Join(chaindataDir, "pebble"), cacher), + }, nil +} + +func dbCacheFdlimit(cfg DBsCacheConfig) (func(string) (int, int), error) { + fmts := make([]func(req string) (string, error), 0, len(cfg.Table)) + fmtsCaches := make([]DBCacheConfig, 0, len(cfg.Table)) + exactTable := make(map[string]DBCacheConfig, len(cfg.Table)) + // build scanf filters + for name, cache := range cfg.Table { + if !strings.ContainsRune(name, '%') { + exactTable[name] = cache + } else { + fn, err := fmtfilter.CompileFilter(name, name) + if err != nil { + return nil, err + } + fmts = append(fmts, fn) + fmtsCaches = append(fmtsCaches, cache) + } } - return scale(2 * opt.MiB) + return func(name string) (int, int) { + // try exact match + if cache, ok := cfg.Table[name]; ok { + return int(cache.Cache), int(cache.Fdlimit) + } + // try regexp + for i, fn := range fmts { + if _, err := fn(name); err == nil { + return int(fmtsCaches[i].Cache), int(fmtsCaches[i].Fdlimit) + } + } + // default + return int(cfg.Table[""].Cache), int(cfg.Table[""].Fdlimit) + }, nil } func dropAllDBs(producer kvdb.IterableDBProducer) { @@ -203,24 +163,44 @@ func dropAllDBs(producer kvdb.IterableDBProducer) { } } -func dropAllDBsIfInterrupted(rawProducer kvdb.IterableDBProducer) { - names := rawProducer.Names() - if len(names) == 0 { - return +func isInterrupted(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { + for _, producer := range rawProducers { + names := producer.Names() + for _, name := range names { + db, err := producer.OpenDB(name) + if err != nil { + return false + } + flushID, err := db.Get(FlushIDKey) + _ = db.Close() + if err != nil { + return false + } + if flushID == nil { + return true + } + } } - // if flushID is not written, then previous genesis processing attempt was interrupted - for _, name := range names { - db, err := rawProducer.OpenDB(name) - if err != nil { - return + return false +} + +func isEmpty(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { + for _, producer := range rawProducers { + if len(producer.Names()) > 0 { + return false } - flushID, err := db.Get(FlushIDKey) - _ = db.Close() - if flushID != nil || err != nil { - return + } + return true +} + +func dropAllDBsIfInterrupted(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { + if isInterrupted(rawProducers) { + for _, producer := range rawProducers { + dropAllDBs(producer) } + return true } - dropAllDBs(rawProducer) + return isEmpty(rawProducers) } type GossipStoreAdapter struct { @@ -236,7 +216,7 @@ func (g *GossipStoreAdapter) GetEvent(id hash.Event) dag.Event { } type DummyFlushableProducer struct { - kvdb.DBProducer + kvdb.IterableDBProducer } func (p *DummyFlushableProducer) NotFlushedSizeEst() int { @@ -246,3 +226,12 @@ func (p *DummyFlushableProducer) NotFlushedSizeEst() int { func (p *DummyFlushableProducer) Flush(_ []byte) error { return nil } + +func MakeDBDirs(chaindataDir string) { + if err := os.MkdirAll(path.Join(chaindataDir, "leveldb"), 0700); err != nil { + utils.Fatalf("Failed to create chaindata/leveldb directory: %v", err) + } + if err := os.MkdirAll(path.Join(chaindataDir, "pebble"), 0700); err != nil { + utils.Fatalf("Failed to create chaindata/pebble directory: %v", err) + } +} diff --git a/integration/makefakegenesis/genesis.go b/integration/makefakegenesis/genesis.go index a0374a4fb..259671937 100644 --- a/integration/makefakegenesis/genesis.go +++ b/integration/makefakegenesis/genesis.go @@ -52,7 +52,7 @@ func FakeGenesisStoreWithRules(num idx.Validator, balance, stake *big.Int, rules } func FakeGenesisStoreWithRulesAndStart(num idx.Validator, balance, stake *big.Int, rules opera.Rules, epoch idx.Epoch, block idx.Block) *genesisstore.Store { - builder := makegenesis.NewGenesisBuilder(memorydb.New()) + builder := makegenesis.NewGenesisBuilder(memorydb.NewProducer("")) validators := GetFakeValidators(num) diff --git a/integration/makegenesis/genesis.go b/integration/makegenesis/genesis.go index ac305d05b..4daa3cf7b 100644 --- a/integration/makegenesis/genesis.go +++ b/integration/makegenesis/genesis.go @@ -31,7 +31,7 @@ import ( ) type GenesisBuilder struct { - tmpDB kvdb.Store + dbs kvdb.DBProducer tmpEvmStore *evmstore.Store tmpStateDB *state.StateDB @@ -65,7 +65,7 @@ func DefaultBlockProc() BlockProc { func (b *GenesisBuilder) GetStateDB() *state.StateDB { if b.tmpStateDB == nil { - tmpEvmStore := evmstore.NewStore(b.tmpDB, evmstore.LiteStoreConfig()) + tmpEvmStore := evmstore.NewStore(b.dbs, evmstore.LiteStoreConfig()) b.tmpStateDB, _ = tmpEvmStore.StateDB(hash.Zero) } return b.tmpStateDB @@ -109,11 +109,11 @@ func (b *GenesisBuilder) CurrentHash() hash.Hash { return er.Hash() } -func NewGenesisBuilder(tmpDb kvdb.Store) *GenesisBuilder { - tmpEvmStore := evmstore.NewStore(tmpDb, evmstore.LiteStoreConfig()) +func NewGenesisBuilder(dbs kvdb.DBProducer) *GenesisBuilder { + tmpEvmStore := evmstore.NewStore(dbs, evmstore.LiteStoreConfig()) statedb, _ := tmpEvmStore.StateDB(hash.Zero) return &GenesisBuilder{ - tmpDB: tmpDb, + dbs: dbs, tmpEvmStore: tmpEvmStore, tmpStateDB: statedb, totalSupply: new(big.Int), diff --git a/integration/metric.go b/integration/metric.go new file mode 100644 index 000000000..c6386b4de --- /dev/null +++ b/integration/metric.go @@ -0,0 +1,141 @@ +package integration + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +const ( + // metricsGatheringInterval specifies the interval to retrieve leveldb database + // compaction, io and pause stats to report to the user. + metricsGatheringInterval = 3 * time.Second +) + +type DBProducerWithMetrics struct { + kvdb.FlushableDBProducer +} + +type StoreWithMetrics struct { + kvdb.Store + + diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read + diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written + + quitLock sync.Mutex // Mutex protecting the quit channel access + quitChan chan chan error // Quit channel to stop the metrics collection before closing the database + + log log.Logger // Contextual logger tracking the database path +} + +func WrapDatabaseWithMetrics(db kvdb.FlushableDBProducer) kvdb.FlushableDBProducer { + wrapper := &DBProducerWithMetrics{db} + return wrapper +} + +func WrapStoreWithMetrics(ds kvdb.Store) *StoreWithMetrics { + wrapper := &StoreWithMetrics{ + Store: ds, + quitChan: make(chan chan error), + } + return wrapper +} + +func (ds *StoreWithMetrics) Close() error { + ds.quitLock.Lock() + defer ds.quitLock.Unlock() + + if ds.quitChan != nil { + errc := make(chan error) + ds.quitChan <- errc + if err := <-errc; err != nil { + ds.log.Error("Metrics collection failed", "err", err) + } + ds.quitChan = nil + } + return ds.Store.Close() +} + +func (ds *StoreWithMetrics) meter(refresh time.Duration) { + // Create storage for iostats. + var iostats [2]float64 + + var ( + errc chan error + merr error + ) + + timer := time.NewTimer(refresh) + defer timer.Stop() + // Iterate ad infinitum and collect the stats + for i := 1; errc == nil && merr == nil; i++ { + // Retrieve the database iostats. + ioStats, err := ds.Stat("leveldb.iostats") + if err != nil { + ds.log.Error("Failed to read database iostats", "err", err) + merr = err + continue + } + var nRead, nWrite float64 + parts := strings.Split(ioStats, " ") + if len(parts) < 2 { + ds.log.Error("Bad syntax of ioStats", "ioStats", ioStats) + merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) + continue + } + if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { + ds.log.Error("Bad syntax of read entry", "entry", parts[0]) + merr = err + continue + } + if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { + log.Error("Bad syntax of write entry", "entry", parts[1]) + merr = err + continue + } + if ds.diskReadMeter != nil { + ds.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) + } + if ds.diskWriteMeter != nil { + ds.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) + } + iostats[0], iostats[1] = nRead, nWrite + + // Sleep a bit, then repeat the stats collection + select { + case errc = <-ds.quitChan: + // Quit requesting, stop hammering the database + case <-timer.C: + timer.Reset(refresh) + // Timeout, gather a new set of stats + } + } + if errc == nil { + errc = <-ds.quitChan + } + errc <- merr +} + +func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { + ds, err := db.FlushableDBProducer.OpenDB(name) + if err != nil { + return nil, err + } + dm := WrapStoreWithMetrics(ds) + if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { + name = "epochs" + } + logger := log.New("database", name) + dm.log = logger + dm.diskReadMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/read", nil) + dm.diskWriteMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/write", nil) + + // Start up the metrics gathering and return + go dm.meter(metricsGatheringInterval) + return dm, nil +} diff --git a/integration/routing.go b/integration/routing.go new file mode 100644 index 000000000..29fd607ce --- /dev/null +++ b/integration/routing.go @@ -0,0 +1,107 @@ +package integration + +import ( + "fmt" + + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/cachedproducer" + "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" + "github.com/Fantom-foundation/lachesis-base/kvdb/skipkeys" +) + +type RoutingConfig struct { + Table map[string]multidb.Route +} + +func DefaultRoutingConfig() RoutingConfig { + return RoutingConfig{ + Table: map[string]multidb.Route{ + "": { + Type: "pebble", + }, + "lachesis": { + Type: "pebble", + Name: "main", + Table: ">", + }, + "gossip": { + Type: "pebble", + Name: "main", + }, + "evm": { + Type: "pebble", + Name: "main", + }, + "gossip/e": { + Type: "pebble", + Name: "events", + }, + "evm/M": { + Type: "pebble", + Name: "evm-data", + }, + "evm-logs": { + Type: "pebble", + Name: "evm-logs", + }, + "gossip-%d": { + Type: "leveldb", + Name: "epoch-%d", + Table: "G", + }, + "lachesis-%d": { + Type: "leveldb", + Name: "epoch-%d", + Table: "L", + }, + }, + } +} + +func MakeFlushableMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, func(), error) { + flushables := make(map[multidb.TypeName]kvdb.FullDBProducer) + var flushID []byte + var err error + var closeDBs = func() {} + for typ, producer := range rawProducers { + existingDBs := producer.Names() + flushablePool := flushable.NewSyncedPool(producer, FlushIDKey) + prevCloseDBs := closeDBs + closeDBs = func() { + prevCloseDBs() + _ = flushablePool.Close() + } + flushID, err = flushablePool.Initialize(existingDBs, flushID) + if err != nil { + return nil, nil, fmt.Errorf("failed to open existing databases: %v", err) + } + flushables[typ] = cachedproducer.WrapAll(flushablePool) + } + + p, err := makeMultiProducer(flushables, cfg) + return p, closeDBs, err +} + +func MakeRawMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { + flushables := make(map[multidb.TypeName]kvdb.FullDBProducer) + for typ, producer := range rawProducers { + flushables[typ] = cachedproducer.WrapAll(&DummyFlushableProducer{producer}) + } + + p, err := makeMultiProducer(flushables, cfg) + return p, err +} + +func makeMultiProducer(producers map[multidb.TypeName]kvdb.FullDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { + multi, err := multidb.NewProducer(producers, cfg.Table, TablesKey) + if err != nil { + return nil, fmt.Errorf("failed to construct multidb: %v", err) + } + + err = multi.Verify() + if err != nil { + return nil, fmt.Errorf("incompatible chainstore DB layout: %v. Try to use 'db migrate' to recover", err) + } + return skipkeys.WrapAllProducer(multi, MetadataPrefix), nil +} diff --git a/utils/adapters/ethdb2kvdb/adapter.go b/utils/adapters/ethdb2kvdb/adapter.go index 5ec3d0f57..d585cf316 100644 --- a/utils/adapters/ethdb2kvdb/adapter.go +++ b/utils/adapters/ethdb2kvdb/adapter.go @@ -15,6 +15,10 @@ func Wrap(v ethdb.KeyValueStore) *Adapter { return &Adapter{v} } +func (db *Adapter) Drop() { + panic("called Drop on ethdb") +} + // batch is a write-only memory batch that commits changes to its host // database when Write is called. A batch cannot be used concurrently. type batch struct { diff --git a/utils/adapters/snap2kvdb/adapter.go b/utils/adapters/snap2kvdb/adapter.go index 809c1a19e..00d59446f 100644 --- a/utils/adapters/snap2kvdb/adapter.go +++ b/utils/adapters/snap2kvdb/adapter.go @@ -43,6 +43,8 @@ func (db *Adapter) Close() error { return nil } +func (db *Adapter) Drop() {} + func (db *Adapter) Stat(property string) (string, error) { return "", nil } From 8fea1c26caf4bbfaaff6f1eb89a8262d7b14c33c Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 27 Mar 2022 01:23:42 +0400 Subject: [PATCH 32/87] add `db migrate` command --- cmd/opera/launcher/db-migrate.go | 358 +++++++++++++++++++++++++++++++ cmd/opera/launcher/launcher.go | 1 + 2 files changed, 359 insertions(+) create mode 100644 cmd/opera/launcher/db-migrate.go diff --git a/cmd/opera/launcher/db-migrate.go b/cmd/opera/launcher/db-migrate.go new file mode 100644 index 000000000..62b68e894 --- /dev/null +++ b/cmd/opera/launcher/db-migrate.go @@ -0,0 +1,358 @@ +package launcher + +import ( + "os" + "path" + "strings" + "time" + + "github.com/Fantom-foundation/lachesis-base/common/bigendian" + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" + "github.com/Fantom-foundation/lachesis-base/kvdb/cachedproducer" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" + "github.com/Fantom-foundation/lachesis-base/kvdb/table" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb/opt" + "gopkg.in/urfave/cli.v1" + + "github.com/Fantom-foundation/go-opera/integration" +) + +func dbMigrate(ctx *cli.Context) error { + cfg := makeAllConfigs(ctx) + + tmpPath := path.Join(cfg.Node.DataDir, "tmp") + integration.MakeDBDirs(tmpPath) + _ = os.RemoveAll(tmpPath) + defer os.RemoveAll(tmpPath) + + // get supported DB producers + dbTypes := getDBProducersFor(path.Join(cfg.Node.DataDir, "chaindata")) + + byReq, err := readRoutes(cfg, dbTypes) + if err != nil { + log.Crit("Failed to read routes", "err", err) + } + byDB := separateIntoDBs(byReq) + + // weed out DBs which don't need migration + { + for _, byReqOfDB := range byDB { + match := true + for _, e := range byReqOfDB { + if e.Old != e.New { + match = false + break + } + } + if match { + for _, e := range byReqOfDB { + delete(byReq, e.Req) + } + } + } + } + if len(byReq) == 0 { + log.Info("No DB migration is needed") + return nil + } + + // check if new layout is contradictory + for _, e0 := range byReq { + for _, e1 := range byReq { + if e0 == e1 { + continue + } + if dbLocatorOf(e0.New) == dbLocatorOf(e1.New) && strings.HasPrefix(e0.New.Table, e1.New.Table) { + log.Crit("New DB layout is contradictory", "db_type", e0.New.Type, "db_name", e0.New.Name, + "req0", e0.Req, "req1", e1.Req, "table0", e0.New.Table, "table1", e1.New.Table) + } + } + } + + // separate entries into inter-linked components + byComponents := make([]map[string]dbMigrationEntry, 0) + for componentI := 0; len(byReq) > 0; componentI++ { + var someEntry dbMigrationEntry + for _, e := range byReq { + someEntry = e + break + } + + // DFS + component := make(map[string]dbMigrationEntry) + stack := make(dbMigrationEntries, 0) + for pwalk := &someEntry; pwalk != nil; pwalk = stack.Pop() { + if _, ok := component[pwalk.Req]; ok { + continue + } + component[pwalk.Req] = *pwalk + delete(byReq, pwalk.Req) + for _, e := range byDB[dbLocatorOf(pwalk.Old)] { + stack = append(stack, e) + } + for _, e := range byDB[dbLocatorOf(pwalk.New)] { + stack = append(stack, e) + } + } + byComponents = append(byComponents, component) + } + + tmpDbTypes := getDBProducersFor(path.Join(cfg.Node.DataDir, "tmp")) + for _, component := range byComponents { + err := migrateComponent(cfg.Node.DataDir, dbTypes, tmpDbTypes, component) + if err != nil { + log.Crit("Failed to migrate component", "err", err) + } + } + id := bigendian.Uint64ToBytes(uint64(time.Now().UnixNano())) + for typ, producer := range dbTypes { + err := clearDirtyFlags(id, producer) + if err != nil { + log.Crit("Failed to write clean FlushID", "type", typ, "err", err) + } + } + + log.Info("DB migration is complete") + + return nil +} + +func getDBProducersFor(chaindataDir string) map[multidb.TypeName]kvdb.FullDBProducer { + dbTypes, err := integration.SupportedDBs(chaindataDir, integration.DBsCacheConfig{ + Table: map[string]integration.DBCacheConfig{ + "": { + Cache: 1024 * opt.MiB, + Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), + }, + }, + }) + if err != nil { + log.Crit("Failed to construct DB producers", "err", err) + } + wrappedDbTypes := make(map[multidb.TypeName]kvdb.FullDBProducer) + for typ, producer := range dbTypes { + wrappedDbTypes[typ] = cachedproducer.WrapAll(&integration.DummyFlushableProducer{IterableDBProducer: producer}) + } + return wrappedDbTypes +} + +type dbMigrationEntry struct { + Req string + Old multidb.Route + New multidb.Route +} + +type dbMigrationEntries []dbMigrationEntry + +func (ee *dbMigrationEntries) Pop() *dbMigrationEntry { + l := len(*ee) + if l == 0 { + return nil + } + res := &(*ee)[l-1] + *ee = (*ee)[:l-1] + return res +} + +var dbLocatorOf = multidb.DBLocatorOf + +var tableLocatorOf = multidb.TableLocatorOf + +func readRoutes(cfg *config, dbTypes map[multidb.TypeName]kvdb.FullDBProducer) (map[string]dbMigrationEntry, error) { + router, err := multidb.NewProducer(dbTypes, cfg.DBs.Routing.Table, integration.TablesKey) + if err != nil { + return nil, err + } + byReq := make(map[string]dbMigrationEntry) + + for typ, producer := range dbTypes { + for _, dbName := range producer.Names() { + db, err := producer.OpenDB(dbName) + if err != nil { + log.Crit("DB opening error", "name", dbName, "err", err) + } + defer db.Close() + tables, err := multidb.ReadTablesList(db, integration.TablesKey) + if err != nil { + log.Crit("Failed to read tables list", "name", dbName, "err", err) + } + for _, t := range tables { + oldRoute := multidb.Route{ + Type: typ, + Name: dbName, + Table: t.Table, + } + newRoute := router.RouteOf(t.Req) + newRoute.NoDrop = false + byReq[t.Req] = dbMigrationEntry{ + Req: t.Req, + New: newRoute, + Old: oldRoute, + } + } + } + } + return byReq, nil +} + +func writeCleanTableRecords(dbTypes map[multidb.TypeName]kvdb.FullDBProducer, byReq map[string]dbMigrationEntry) error { + records := make(map[multidb.DBLocator][]multidb.TableRecord, 0) + for _, e := range byReq { + records[dbLocatorOf(e.New)] = append(records[dbLocatorOf(e.New)], multidb.TableRecord{ + Req: e.Req, + Table: e.New.Table, + }) + } + written := make(map[multidb.DBLocator]bool) + for _, e := range byReq { + if written[dbLocatorOf(e.New)] { + continue + } + written[dbLocatorOf(e.New)] = true + + db, err := dbTypes[e.New.Type].OpenDB(e.New.Name) + if err != nil { + return err + } + defer db.Close() + err = multidb.WriteTablesList(db, integration.TablesKey, records[dbLocatorOf(e.New)]) + if err != nil { + return err + } + } + return nil +} + +func migrateComponent(datadir string, dbTypes, tmpDbTypes map[multidb.TypeName]kvdb.FullDBProducer, byReq map[string]dbMigrationEntry) error { + byDB := separateIntoDBs(byReq) + // if it can be migrated just by DB renaming + if len(byDB) == 2 { + oldDBName := "" + newDBName := "" + typ := multidb.TypeName("") + ok := true + for _, e := range byReq { + if len(typ) == 0 { + oldDBName = e.Old.Name + newDBName = e.New.Name + typ = e.New.Type + } + if e.Old.Table != e.New.Table || e.New.Name != newDBName || e.Old.Name != oldDBName || + e.Old.Type != typ || e.New.Type != typ { + ok = false + break + } + } + if ok { + oldPath := path.Join(datadir, "chaindata", string(typ), oldDBName) + newPath := path.Join(datadir, "chaindata", string(typ), newDBName) + log.Info("Renaming DB", "old", oldPath, "new", newPath) + return os.Rename(oldPath, newPath) + } + } + + toMove := make(map[multidb.DBLocator]bool) + { + const batchKeys = 100000 + keys := make([][]byte, 0, batchKeys) + values := make([][]byte, 0, batchKeys) + for _, e := range byReq { + err := func() error { + oldDB, err := dbTypes[e.Old.Type].OpenDB(e.Old.Name) + if err != nil { + return err + } + oldDB = batched.Wrap(oldDB) + defer oldDB.Close() + newDB, err := tmpDbTypes[e.New.Type].OpenDB(e.New.Name) + if err != nil { + return err + } + toMove[dbLocatorOf(e.New)] = true + newDbName := "tmp/" + e.New.Name + newDB = batched.Wrap(newDB) + defer newDB.Close() + log.Info("Copying DB table", "req", e.Req, "old_db_type", e.Old.Type, "old_db_name", e.Old.Name, "old_table", e.Old.Table, + "new_db_type", e.New.Type, "new_db_name", newDbName, "new_table", e.New.Table) + oldTable := table.New(oldDB, []byte(e.Old.Table)) + newTable := table.New(newDB, []byte(e.New.Table)) + it := oldTable.NewIterator(nil, nil) + defer it.Release() + + for next := true; next; { + for len(keys) < batchKeys { + next = it.Next() + if !next { + break + } + keys = append(keys, common.CopyBytes(it.Key())) + values = append(values, common.CopyBytes(it.Value())) + } + for i := 0; i < len(keys); i++ { + err = newTable.Put(keys[i], values[i]) + if err != nil { + return err + } + } + keys = keys[:0] + values = values[:0] + } + return nil + }() + if err != nil { + return err + } + } + } + + // finalize tmp DBs + err := writeCleanTableRecords(tmpDbTypes, byReq) + if err != nil { + return err + } + + // drop unused DBs + dropped := make(map[multidb.DBLocator]bool) + for _, e := range byReq { + if dropped[dbLocatorOf(e.Old)] { + continue + } + dropped[dbLocatorOf(e.Old)] = true + log.Info("Dropping old DB", "db_type", e.Old.Type, "db_name", e.Old.Name) + deletePath := path.Join(datadir, "chaindata", string(e.Old.Type), e.Old.Name) + err := os.RemoveAll(deletePath) + if err != nil { + return err + } + } + // move tmp DBs + for e := range toMove { + oldPath := path.Join(datadir, "tmp", string(e.Type), e.Name) + newPath := path.Join(datadir, "chaindata", string(e.Type), e.Name) + log.Info("Moving tmp DB to clean dir", "old", oldPath, "new", newPath) + err := os.Rename(oldPath, newPath) + if err != nil { + return err + } + } + return nil +} + +func separateIntoDBs(byReq map[string]dbMigrationEntry) map[multidb.DBLocator]map[string]dbMigrationEntry { + byDB := make(map[multidb.DBLocator]map[string]dbMigrationEntry) + for _, e := range byReq { + if byDB[dbLocatorOf(e.Old)] == nil { + byDB[dbLocatorOf(e.Old)] = make(map[string]dbMigrationEntry) + } + byDB[dbLocatorOf(e.Old)][e.Req] = e + if byDB[dbLocatorOf(e.New)] == nil { + byDB[dbLocatorOf(e.New)] = make(map[string]dbMigrationEntry) + } + byDB[dbLocatorOf(e.New)][e.Req] = e + } + return byDB +} diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 59e446a86..3208ac9eb 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -214,6 +214,7 @@ func init() { importCommand, exportCommand, checkCommand, + dbCommand, // See snapshot.go snapshotCommand, // See dbcmd.go From 0b6e1cd6231c75548867c32131c14e3a94f6dacb Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 12 Jun 2022 10:56:11 +0400 Subject: [PATCH 33/87] adopt fixdity command for multidb feature --- cmd/opera/launcher/dbcmd.go | 33 +++++++++++++ cmd/opera/launcher/fixdirty.go | 86 +++++++++++++++++----------------- cmd/opera/launcher/launcher.go | 2 - 3 files changed, 75 insertions(+), 46 deletions(-) diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index 66c65cb5d..a6ccaa172 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -10,6 +10,10 @@ import ( ) var ( + experimentalFlag = cli.BoolFlag{ + Name: "experimental", + Usage: "Allow experimental DB fixing", + } dbCommand = cli.Command{ Name: "db", Usage: "A set of commands related to leveldb database", @@ -28,6 +32,35 @@ var ( Description: ` opera db compact will compact all databases under datadir's chaindata. +`, + }, + { + Name: "migrate", + Usage: "Migrate tables layout", + ArgsUsage: "", + Action: utils.MigrateFlags(dbMigrate), + Category: "DB COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Description: ` +opera db migrate +will migrate tables layout according to the configuration. +`, + }, + { + Name: "fix", + Usage: "Experimental - try to fix dirty DB", + ArgsUsage: "", + Action: utils.MigrateFlags(fixDirty), + Category: "DB COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + experimentalFlag, + }, + Description: ` +opera db fix --experimental +Experimental - try to fix dirty DB. `, }, }, diff --git a/cmd/opera/launcher/fixdirty.go b/cmd/opera/launcher/fixdirty.go index 7b414f0d1..36ad17c28 100644 --- a/cmd/opera/launcher/fixdirty.go +++ b/cmd/opera/launcher/fixdirty.go @@ -2,7 +2,6 @@ package launcher import ( "fmt" - "strings" "time" "github.com/Fantom-foundation/lachesis-base/abft" @@ -10,6 +9,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/log" @@ -21,52 +21,44 @@ import ( "github.com/Fantom-foundation/go-opera/inter/iblockproc" ) -var ( - fixDirtyCommand = cli.Command{ - Action: utils.MigrateFlags(fixDirty), - Name: "fixdirty", - Usage: "Experimental - try to fix dirty DB", - ArgsUsage: "", - Flags: append(nodeFlags, testFlags...), - Category: "MISCELLANEOUS COMMANDS", - Description: `Experimental - try to fix dirty DB.`, - } -) - // maxEpochsToTry represents amount of last closed epochs to try (in case that the last one has the state unavailable) const maxEpochsToTry = 10000 // fixDirty is the fixdirty command. func fixDirty(ctx *cli.Context) error { + if !ctx.Bool(experimentalFlag.Name) { + utils.Fatalf("Add --experimental flag") + } cfg := makeAllConfigs(ctx) log.Info("Opening databases") - producer := makeRawDbsProducer(cfg) + dbTypes := makeRawDbsProducers(cfg) + multiProducer := makeMultiRawDbsProducer(dbTypes, cfg) // reverts the gossip database state - epochState, err := fixDirtyGossipDb(producer, cfg) + epochState, err := fixDirtyGossipDb(multiProducer, cfg) if err != nil { return err } - // drop epoch databases + // drop epoch-related databases and consensus database log.Info("Removing epoch DBs - will be recreated on next start") - err = dropAllEpochDbs(producer) - if err != nil { - return err + for _, name := range []string{ + fmt.Sprintf("gossip-%d", epochState.Epoch), + fmt.Sprintf("lachesis-%d", epochState.Epoch), + "lachesis", + } { + err = eraseTable(name, multiProducer) + if err != nil { + return err + } } - // drop consensus database - log.Info("Removing lachesis db") - cMainDb := mustOpenDB(producer, "lachesis") - _ = cMainDb.Close() - cMainDb.Drop() - // prepare consensus database from epochState - log.Info("Recreating lachesis db") - cMainDb = mustOpenDB(producer, "lachesis") + log.Info("Recreating lachesis DB") + cMainDb := mustOpenDB(multiProducer, "lachesis") cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { - return mustOpenDB(producer, fmt.Sprintf("lachesis-%d", epoch)) + return mustOpenDB(multiProducer, fmt.Sprintf("lachesis-%d", epoch)) } cdb := abft.NewStore(cMainDb, cGetEpochDB, panics("Lachesis store"), cfg.LachesisStore) err = cdb.ApplyGenesis(&abft.Genesis{ @@ -78,10 +70,13 @@ func fixDirty(ctx *cli.Context) error { } _ = cdb.Close() - log.Info("Clearing dbs dirty flags") - err = clearDirtyFlags(producer) - if err != nil { - return err + log.Info("Clearing DBs dirty flags") + id := bigendian.Uint64ToBytes(uint64(time.Now().UnixNano())) + for typ, producer := range dbTypes { + err := clearDirtyFlags(id, producer) + if err != nil { + log.Crit("Failed to write clean FlushID", "type", typ, "err", err) + } } log.Info("Fixing done") @@ -147,24 +142,27 @@ func getLastEpochWithState(gdb *gossip.Store, epochsToTry idx.Epoch) (epochIdx i return 0, nil, nil } -func dropAllEpochDbs(producer kvdb.IterableDBProducer) error { - for _, name := range producer.Names() { - if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { - log.Info("Removing db", "name", name) - db, err := producer.OpenDB(name) - if err != nil { - return fmt.Errorf("unable to open db %s; %s", name, err) - } - _ = db.Close() - db.Drop() +func eraseTable(name string, producer kvdb.IterableDBProducer) error { + log.Info("Cleaning table", "name", name) + db, err := producer.OpenDB(name) + if err != nil { + return fmt.Errorf("unable to open DB %s; %s", name, err) + } + db = batched.Wrap(db) + defer db.Close() + it := db.NewIterator(nil, nil) + defer it.Release() + for it.Next() { + err := db.Delete(it.Key()) + if err != nil { + return err } } return nil } // clearDirtyFlags - writes the CleanPrefix into all databases -func clearDirtyFlags(rawProducer kvdb.IterableDBProducer) error { - id := bigendian.Uint64ToBytes(uint64(time.Now().UnixNano())) +func clearDirtyFlags(id []byte, rawProducer kvdb.IterableDBProducer) error { names := rawProducer.Names() for _, name := range names { db, err := rawProducer.OpenDB(name) diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 3208ac9eb..a4dc252f1 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -219,8 +219,6 @@ func init() { snapshotCommand, // See dbcmd.go dbCommand, - // See fixdirty.go - fixDirtyCommand, } sort.Sort(cli.CommandsByName(app.Commands)) From c5657e15d5b18ca5190b5e5b045669415f8d6d1e Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 29 Jun 2022 22:43:57 +0400 Subject: [PATCH 34/87] add batching for genesis processing --- gossip/apply_genesis.go | 23 +++++++++++++++++ gossip/evmstore/apply_genesis.go | 23 +++++++++++++++++ gossip/evmstore/store.go | 6 ++--- gossip/filters/filter_system_test.go | 2 +- topicsdb/search_test.go | 4 +-- topicsdb/topicsdb.go | 38 ++++++++++++++++++++-------- topicsdb/topicsdb_test.go | 8 +++--- 7 files changed, 84 insertions(+), 20 deletions(-) diff --git a/gossip/apply_genesis.go b/gossip/apply_genesis.go index 5409ffa27..ec06630e2 100644 --- a/gossip/apply_genesis.go +++ b/gossip/apply_genesis.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/Fantom-foundation/lachesis-base/hash" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" "github.com/Fantom-foundation/go-opera/inter/iblockproc" "github.com/Fantom-foundation/go-opera/inter/ibr" @@ -13,6 +14,10 @@ import ( // ApplyGenesis writes initial state. func (s *Store) ApplyGenesis(g genesis.Genesis) (genesisHash hash.Hash, err error) { + // use batching wrapper for hot tables + unwrap := s.WrapTablesAsBatched() + defer unwrap() + // write epochs var topEr *ier.LlrIdxFullEpochRecord g.Epochs.ForEach(func(er ier.LlrIdxFullEpochRecord) bool { @@ -67,3 +72,21 @@ func (s *Store) ApplyGenesis(g genesis.Genesis) (genesisHash hash.Hash, err erro return genesisHash, err } + +func (s *Store) WrapTablesAsBatched() (unwrap func()) { + origTables := s.table + + batchedBlocks := batched.Wrap(s.table.Blocks) + s.table.Blocks = batchedBlocks + + batchedBlockHashes := batched.Wrap(s.table.BlockHashes) + s.table.BlockHashes = batchedBlockHashes + + unwrapEVM := s.evm.WrapTablesAsBatched() + return func() { + unwrapEVM() + _ = batchedBlocks.Flush() + _ = batchedBlockHashes.Flush() + s.table = origTables + } +} diff --git a/gossip/evmstore/apply_genesis.go b/gossip/evmstore/apply_genesis.go index e61373d2c..62f7bab8b 100644 --- a/gossip/evmstore/apply_genesis.go +++ b/gossip/evmstore/apply_genesis.go @@ -2,6 +2,7 @@ package evmstore import ( "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" "github.com/Fantom-foundation/go-opera/opera/genesis" ) @@ -32,3 +33,25 @@ func (s *Store) ApplyGenesis(g genesis.Genesis) (err error) { } return batch.Write() } + +func (s *Store) WrapTablesAsBatched() (unwrap func()) { + origTables := s.table + + batchedTxs := batched.Wrap(s.table.Txs) + s.table.Txs = batchedTxs + + batchedTxPositions := batched.Wrap(s.table.TxPositions) + s.table.TxPositions = batchedTxPositions + + unwrapLogs := s.EvmLogs.WrapTablesAsBatched() + + batchedReceipts := batched.Wrap(s.table.Receipts) + s.table.Receipts = batchedReceipts + return func() { + _ = batchedTxs.Flush() + _ = batchedTxPositions.Flush() + _ = batchedReceipts.Flush() + unwrapLogs() + s.table = origTables + } +} diff --git a/gossip/evmstore/store.go b/gossip/evmstore/store.go index c74497bcc..1a9987332 100644 --- a/gossip/evmstore/store.go +++ b/gossip/evmstore/store.go @@ -33,8 +33,7 @@ type Store struct { cfg StoreConfig table struct { - Evm kvdb.Store `table:"M"` - Logs kvdb.Store `table:"L"` + Evm kvdb.Store `table:"M"` // API-only tables Receipts kvdb.Store `table:"r"` TxPositions kvdb.Store `table:"x"` @@ -78,7 +77,7 @@ func NewStore(dbs kvdb.DBProducer, cfg StoreConfig) *Store { } s.initEVMDB() - s.EvmLogs = topicsdb.New(s.table.Logs) + s.EvmLogs = topicsdb.New(dbs) s.initCache() return s @@ -93,6 +92,7 @@ func (s *Store) Close() { _ = table.CloseTables(&s.table) table.MigrateTables(&s.table, nil) table.MigrateCaches(&s.cache, setnil) + s.EvmLogs.Close() } func (s *Store) initCache() { diff --git a/gossip/filters/filter_system_test.go b/gossip/filters/filter_system_test.go index f27c1e1a4..3a7e257cc 100644 --- a/gossip/filters/filter_system_test.go +++ b/gossip/filters/filter_system_test.go @@ -49,7 +49,7 @@ type testBackend struct { func newTestBackend() *testBackend { return &testBackend{ db: rawdb.NewMemoryDatabase(), - logIndex: topicsdb.New(memorydb.New()), + logIndex: topicsdb.New(memorydb.NewProducer("")), blocksFeed: new(notify.Feed), txsFeed: new(notify.Feed), logsFeed: new(notify.Feed), diff --git a/topicsdb/search_test.go b/topicsdb/search_test.go index 0026ea22d..b247e04c0 100644 --- a/topicsdb/search_test.go +++ b/topicsdb/search_test.go @@ -14,8 +14,8 @@ import ( func BenchmarkSearch(b *testing.B) { topics, recs, topics4rec := genTestData(1000) - mem := memorydb.New() - index := New(mem) + mem := memorydb.NewProducer("") + index := New(mem, false) for _, rec := range recs { err := index.Push(rec) diff --git a/topicsdb/topicsdb.go b/topicsdb/topicsdb.go index acde8129a..ea319cdaa 100644 --- a/topicsdb/topicsdb.go +++ b/topicsdb/topicsdb.go @@ -6,8 +6,8 @@ import ( "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" "github.com/Fantom-foundation/lachesis-base/kvdb/table" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -15,13 +15,12 @@ import ( const MaxTopicsCount = 5 // count is limited hard to 5 by EVM (see LOG0...LOG4 ops) var ( - ErrEmptyTopics = fmt.Errorf("Empty topics") - ErrTooBigTopics = fmt.Errorf("Too big topics") + ErrEmptyTopics = fmt.Errorf("empty topics") + ErrTooBigTopics = fmt.Errorf("too many topics") ) // Index is a specialized indexes for log records storing and fetching. type Index struct { - db kvdb.Store table struct { // topic+topicN+(blockN+TxHash+logIndex) -> topic_count (where topicN=0 is for address) Topic kvdb.Store `table:"t"` @@ -31,16 +30,30 @@ type Index struct { } // New Index instance. -func New(db kvdb.Store) *Index { - tt := &Index{ - db: db, - } +func New(dbs kvdb.DBProducer) *Index { + tt := &Index{} - table.MigrateTables(&tt.table, tt.db) + err := table.OpenTables(&tt.table, dbs, "evm-logs") + if err != nil { + panic(err) + } return tt } +func (tt *Index) WrapTablesAsBatched() (unwrap func()) { + origTables := tt.table + batchedTopic := batched.Wrap(tt.table.Topic) + tt.table.Topic = batchedTopic + batchedLogrec := batched.Wrap(tt.table.Logrec) + tt.table.Logrec = batchedLogrec + return func() { + _ = batchedTopic.Flush() + _ = batchedLogrec.Flush() + tt.table = origTables + } +} + // FindInBlocks returns all log records of block range by pattern. 1st pattern element is an address. // The same as FindInBlocksAsync but fetches log's body sync. func (tt *Index) FindInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash) (logs []*types.Log, err error) { @@ -144,7 +157,7 @@ func (tt *Index) MustPush(recs ...*types.Log) { } } -// Write log record to database. +// Push log record to database batch func (tt *Index) Push(recs ...*types.Log) error { for _, rec := range recs { if len(rec.Topics) > MaxTopicsCount { @@ -193,3 +206,8 @@ func (tt *Index) Push(recs ...*types.Log) error { return nil } + +func (tt *Index) Close() { + _ = tt.table.Topic.Close() + _ = tt.table.Logrec.Close() +} diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index 18f501bf2..b59d70ad7 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -102,7 +102,7 @@ func TestIndexSearchMultyVariants(t *testing.T) { }, } - index := New(memorydb.New()) + index := New(memorydb.NewProducer("")) for _, l := range testdata { err := index.Push(l) @@ -201,7 +201,7 @@ func TestIndexSearchSingleVariant(t *testing.T) { topics, recs, topics4rec := genTestData(100) - index := New(memorydb.New()) + index := New(memorydb.NewProducer("")) for _, rec := range recs { err := index.Push(rec) @@ -271,7 +271,7 @@ func TestIndexSearchSimple(t *testing.T) { }, } - index := New(memorydb.New()) + index := New(memorydb.NewProducer("")) for _, l := range testdata { err := index.Push(l) @@ -331,7 +331,7 @@ func TestMaxTopicsCount(t *testing.T) { pattern[i+1] = []common.Hash{testdata.Topics[i]} } - index := New(memorydb.New()) + index := New(memorydb.NewProducer("")) err := index.Push(testdata) require.NoError(t, err) From 82d18157229ab2fe93f0a7064f0683956c7e8491 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 00:36:57 +0700 Subject: [PATCH 35/87] use piecefunc from lachesis-base --- gossip/emitter/control.go | 2 +- gossip/emitter/emitter.go | 2 +- gossip/emitter/emitter_llr.go | 2 +- gossip/emitter/piecefuncs.go | 3 +- gossip/emitter/validators.go | 3 +- gossip/gasprice/constructive.go | 2 +- gossip/gasprice/gasprice.go | 2 +- gossip/gasprice/reactive.go | 3 +- utils/piecefunc/piecefunc.go | 80 ----------- utils/piecefunc/piecefunc_test.go | 216 ------------------------------ 10 files changed, 8 insertions(+), 307 deletions(-) delete mode 100644 utils/piecefunc/piecefunc.go delete mode 100644 utils/piecefunc/piecefunc_test.go diff --git a/gossip/emitter/control.go b/gossip/emitter/control.go index 7fa7f33a1..1bd62fd84 100644 --- a/gossip/emitter/control.go +++ b/gossip/emitter/control.go @@ -6,9 +6,9 @@ import ( "github.com/Fantom-foundation/lachesis-base/emitter/ancestor" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/inter/pos" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" "github.com/Fantom-foundation/go-opera/inter" - "github.com/Fantom-foundation/go-opera/utils/piecefunc" ) func scalarUpdMetric(diff idx.Event, weight pos.Weight, totalWeight pos.Weight) ancestor.Metric { diff --git a/gossip/emitter/emitter.go b/gossip/emitter/emitter.go index d92c3fd20..a2bb8d31e 100644 --- a/gossip/emitter/emitter.go +++ b/gossip/emitter/emitter.go @@ -12,6 +12,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/inter/pos" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" "github.com/ethereum/go-ethereum/core/types" lru "github.com/hashicorp/golang-lru" @@ -20,7 +21,6 @@ import ( "github.com/Fantom-foundation/go-opera/inter" "github.com/Fantom-foundation/go-opera/logger" "github.com/Fantom-foundation/go-opera/tracing" - "github.com/Fantom-foundation/go-opera/utils/piecefunc" "github.com/Fantom-foundation/go-opera/utils/rate" ) diff --git a/gossip/emitter/emitter_llr.go b/gossip/emitter/emitter_llr.go index b3da14bb7..26c169846 100644 --- a/gossip/emitter/emitter_llr.go +++ b/gossip/emitter/emitter_llr.go @@ -5,10 +5,10 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" "github.com/Fantom-foundation/go-opera/eventcheck/basiccheck" "github.com/Fantom-foundation/go-opera/inter" - "github.com/Fantom-foundation/go-opera/utils/piecefunc" ) var emptyLlrBlockVotes = inter.LlrBlockVotes{ diff --git a/gossip/emitter/piecefuncs.go b/gossip/emitter/piecefuncs.go index af45f2dd6..f1cad0338 100644 --- a/gossip/emitter/piecefuncs.go +++ b/gossip/emitter/piecefuncs.go @@ -3,8 +3,7 @@ package emitter import ( "github.com/Fantom-foundation/lachesis-base/emitter/ancestor" "github.com/Fantom-foundation/lachesis-base/inter/idx" - - "github.com/Fantom-foundation/go-opera/utils/piecefunc" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" ) var ( diff --git a/gossip/emitter/validators.go b/gossip/emitter/validators.go index 3b8c2e7de..4bcbb5d13 100644 --- a/gossip/emitter/validators.go +++ b/gossip/emitter/validators.go @@ -5,8 +5,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/inter/pos" - - "github.com/Fantom-foundation/go-opera/utils/piecefunc" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" ) const ( diff --git a/gossip/gasprice/constructive.go b/gossip/gasprice/constructive.go index d7fb99aa2..1a01554f8 100644 --- a/gossip/gasprice/constructive.go +++ b/gossip/gasprice/constructive.go @@ -3,7 +3,7 @@ package gasprice import ( "math/big" - "github.com/Fantom-foundation/go-opera/utils/piecefunc" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" ) func (gpo *Oracle) maxTotalGasPower() *big.Int { diff --git a/gossip/gasprice/gasprice.go b/gossip/gasprice/gasprice.go index 872f29d6a..764ae7506 100644 --- a/gossip/gasprice/gasprice.go +++ b/gossip/gasprice/gasprice.go @@ -23,12 +23,12 @@ import ( "time" "github.com/Fantom-foundation/lachesis-base/inter/idx" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" lru "github.com/hashicorp/golang-lru" "github.com/Fantom-foundation/go-opera/opera" - "github.com/Fantom-foundation/go-opera/utils/piecefunc" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" diff --git a/gossip/gasprice/reactive.go b/gossip/gasprice/reactive.go index 4c4464fbb..3b78accea 100644 --- a/gossip/gasprice/reactive.go +++ b/gossip/gasprice/reactive.go @@ -6,9 +6,8 @@ import ( "sync/atomic" "time" + "github.com/Fantom-foundation/lachesis-base/utils/piecefunc" "github.com/ethereum/go-ethereum/core/types" - - "github.com/Fantom-foundation/go-opera/utils/piecefunc" ) const ( diff --git a/utils/piecefunc/piecefunc.go b/utils/piecefunc/piecefunc.go deleted file mode 100644 index 659f83336..000000000 --- a/utils/piecefunc/piecefunc.go +++ /dev/null @@ -1,80 +0,0 @@ -package piecefunc - -import "math" - -const ( - // DecimalUnit is used to define ratios with integers, it's 1.0 - DecimalUnit = 1e6 - maxVal = math.MaxUint64/uint64(DecimalUnit) - 1 -) - -// Dot is a pair of numbers -type Dot struct { - X uint64 - Y uint64 -} - -type Func struct { - dots []Dot -} - -func NewFunc(dots []Dot) func(x uint64) uint64 { - if len(dots) < 2 { - panic("too few dots") - } - - var prevX uint64 - for i, dot := range dots { - if i >= 1 && dot.X <= prevX { - panic("non monotonic X") - } - if dot.Y > maxVal { - panic("too large Y") - } - if dot.X > maxVal { - panic("too large X") - } - prevX = dot.X - } - - return Func{ - dots: dots, - }.Get -} - -// Mul is multiplication of ratios with integer numbers -func Mul(a, b uint64) uint64 { - return a * b / DecimalUnit -} - -// Div is division of ratios with integer numbers -func Div(a, b uint64) uint64 { - return a * DecimalUnit / b -} - -// Get calculates f(x), where f is a piecewise linear function defined by the pieces -func (f Func) Get(x uint64) uint64 { - if x < f.dots[0].X { - return f.dots[0].Y - } - if x > f.dots[len(f.dots)-1].X { - return f.dots[len(f.dots)-1].Y - } - // find a piece - p0 := len(f.dots) - 2 - for i, piece := range f.dots { - if i >= 1 && i < len(f.dots)-1 && piece.X > x { - p0 = i - 1 - break - } - } - // linearly interpolate - p1 := p0 + 1 - - x0, x1 := f.dots[p0].X, f.dots[p1].X - y0, y1 := f.dots[p0].Y, f.dots[p1].Y - - ratio := Div(x-x0, x1-x0) - - return Mul(y0, DecimalUnit-ratio) + Mul(y1, ratio) -} diff --git a/utils/piecefunc/piecefunc_test.go b/utils/piecefunc/piecefunc_test.go deleted file mode 100644 index bdb4d47e3..000000000 --- a/utils/piecefunc/piecefunc_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package piecefunc - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConstFunc(t *testing.T) { - require := require.New(t) - - var constF = NewFunc([]Dot{ - { - X: 0.0 * DecimalUnit, - Y: 1.0 * DecimalUnit, - }, - { - X: 10.0 * DecimalUnit, - Y: 1.0 * DecimalUnit, - }, - { - X: 20.0 * DecimalUnit, - Y: 1.0 * DecimalUnit, - }, - { - X: 0xFFFFFF * DecimalUnit, - Y: 1.0 * DecimalUnit, - }, - }) - - for i, x := range []uint64{0, 1, 5, 10, 20, 0xFFFF, 0xFFFFFF} { - X := x * DecimalUnit - Y := constF(X) - require.Equal(uint64(DecimalUnit), Y, i) - } -} - -func TestUp45Func(t *testing.T) { - require := require.New(t) - - var up45F = NewFunc([]Dot{ - { - X: 0.0 * DecimalUnit, - Y: 0.0 * DecimalUnit, - }, - { - X: 10.0 * DecimalUnit, - Y: 10.0 * DecimalUnit, - }, - { - X: 20.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - { - X: 0xFFFFFF * DecimalUnit, - Y: 21.0 * DecimalUnit, - }, - }) - - for i, x := range []uint64{0, 1, 3, 5, 10, 20} { - X := x * DecimalUnit - Y := up45F(X) - require.Equal(X, Y, i) - } - - for i, x := range []uint64{21, 0xFFFF, 0xFFFFFF} { - X := x * DecimalUnit - Y := up45F(X) - require.True(20.0*DecimalUnit <= Y && Y <= 21.0*DecimalUnit, i) - } -} - -func TestDown45Func(t *testing.T) { - require := require.New(t) - - var down45F = NewFunc([]Dot{ - { - X: 0.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - { - X: 10.0 * DecimalUnit, - Y: 10.0 * DecimalUnit, - }, - { - X: 20.0 * DecimalUnit, - Y: 0.0 * DecimalUnit, - }, - }) - - for i, x := range []uint64{0, 1, 2, 5, 10, 20} { - X := x * DecimalUnit - Y := down45F(X) - require.Equal(20.0*DecimalUnit-X, Y, i) - } -} - -func TestFuncCheck(t *testing.T) { - require := require.New(t) - - require.Panics(func() { - // too few dots - _ = NewFunc([]Dot{ - { - X: 0.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - }) - }) - - require.Panics(func() { - // non monotonic X - _ = NewFunc([]Dot{ - { - X: 0.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - { - X: 20.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - { - X: 10.0 * DecimalUnit, - Y: 20.0 * DecimalUnit, - }, - }) - }) - require.Panics(func() { - // too large val - NewFunc([]Dot{ - { - X: 0, - Y: 0, - }, - { - X: maxVal + 1, - Y: 1, - }, - }) - }) - require.Panics(func() { - // too large val - NewFunc([]Dot{ - { - X: 0, - Y: 0, - }, - { - X: 1, - Y: maxVal + 1, - }, - }) - }) - require.Panics(func() { - // too large val - NewFunc([]Dot{ - { - X: maxVal + 1, - Y: maxVal + 1, - }, - { - X: maxVal + 2, - Y: maxVal + 2, - }, - }) - }) -} - -func TestCustomFunc(t *testing.T) { - require := require.New(t) - - var f = NewFunc([]Dot{ - { - X: 1, - Y: 0.0 * DecimalUnit, - }, - { - X: 10.0 * DecimalUnit, - Y: 10.0 * DecimalUnit, - }, - { - X: 20.0 * DecimalUnit, - Y: 40.0 * DecimalUnit, - }, - { - X: 25.0 * DecimalUnit, - Y: 41.0 * DecimalUnit, - }, - { - X: maxVal, - Y: maxVal, - }, - }) - - require.Equal(uint64(0), f(0)) - require.Equal(uint64(0), f(1)) - require.Equal(uint64(0), f(10)) - require.Equal(uint64(10), f(11)) - require.Equal(uint64(4.99999*DecimalUnit), f(5.0*DecimalUnit)) - require.Equal(uint64(10.0*DecimalUnit), f(10.0*DecimalUnit)) - require.Equal(uint64(25.0*DecimalUnit), f(15.0*DecimalUnit)) - require.Equal(uint64(34.0*DecimalUnit), f(18.0*DecimalUnit)) - require.Equal(uint64(37.0*DecimalUnit), f(19.0*DecimalUnit)) - require.Equal(uint64(37.3*DecimalUnit), f(19.1*DecimalUnit)) - require.Equal(uint64(40.0*DecimalUnit), f(20.0*DecimalUnit)) - require.Equal(uint64(40.4*DecimalUnit), f(22.0*DecimalUnit)) - require.Equal(uint64(41*DecimalUnit), f(25.0*DecimalUnit)) - require.Equal(uint64(250012.273351*DecimalUnit), f(250000.0*DecimalUnit)) - require.Equal(uint64(2500011.987361*DecimalUnit), f(2500000.0*DecimalUnit)) - require.Equal(uint64(maxVal-18446704-18446703), f(maxVal-18446704-16)) - require.Equal(uint64(maxVal-18446704), f(maxVal-18446704-15)) - require.Equal(uint64(maxVal-18446704), f(maxVal-1)) - require.Equal(uint64(maxVal), f(maxVal)) - require.Equal(uint64(maxVal), f(math.MaxUint64)) -} From d697ba7a7ce271fdd5d0a880aad895e6665d7d8b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 00:57:03 +0700 Subject: [PATCH 36/87] refactor evm.Commit args --- gossip/evmstore/store.go | 9 +++++---- gossip/store.go | 3 ++- integration/makegenesis/genesis.go | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/gossip/evmstore/store.go b/gossip/evmstore/store.go index 1a9987332..ebf7f8ff1 100644 --- a/gossip/evmstore/store.go +++ b/gossip/evmstore/store.go @@ -4,6 +4,7 @@ import ( "errors" "github.com/Fantom-foundation/lachesis-base/hash" + "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/nokeyiserr" "github.com/Fantom-foundation/lachesis-base/kvdb/table" @@ -184,9 +185,9 @@ func (s *Store) IsEvmSnapshotPaused() bool { } // Commit changes. -func (s *Store) Commit(block iblockproc.BlockState, flush bool) error { +func (s *Store) Commit(block idx.Block, root hash.Hash, flush bool) error { triedb := s.EvmState.TrieDB() - stateRoot := common.Hash(block.FinalizedStateRoot) + stateRoot := common.Hash(root) // If we're applying genesis or running an archive node, always flush if flush || s.cfg.Cache.TrieDirtyDisabled { err := triedb.Commit(stateRoot, false, nil) @@ -197,9 +198,9 @@ func (s *Store) Commit(block iblockproc.BlockState, flush bool) error { } else { // Full but not archive node, do proper garbage collection triedb.Reference(stateRoot, common.Hash{}) // metadata reference to keep trie alive - s.triegc.Push(stateRoot, -int64(block.LastBlock.Idx)) + s.triegc.Push(stateRoot, -int64(block)) - if current := uint64(block.LastBlock.Idx); current > TriesInMemory { + if current := uint64(block); current > TriesInMemory { // If we exceeded our memory allowance, flush matured singleton nodes to disk s.Cap() diff --git a/gossip/store.go b/gossip/store.go index eb2f8fc48..d972bc8c4 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -177,7 +177,8 @@ func (s *Store) isCommitNeeded(sc, tc int) bool { // commitEVM commits EVM storage func (s *Store) commitEVM(flush bool) { - err := s.evm.Commit(s.GetBlockState(), flush) + bs := s.GetBlockState() + err := s.evm.Commit(bs.LastBlock.Idx, bs.FinalizedStateRoot, flush) if err != nil { s.Log.Crit("Failed to commit EVM storage", "err", err) } diff --git a/integration/makegenesis/genesis.go b/integration/makegenesis/genesis.go index 4daa3cf7b..364118b8f 100644 --- a/integration/makegenesis/genesis.go +++ b/integration/makegenesis/genesis.go @@ -218,7 +218,7 @@ func (b *GenesisBuilder) ExecuteGenesisTxs(blockProc BlockProc, genesisTxs types } b.epochs = append(b.epochs, b.currentEpoch) - return b.tmpEvmStore.Commit(bs, true) + return b.tmpEvmStore.Commit(bs.LastBlock.Idx, bs.FinalizedStateRoot, true) } type memFile struct { From 00186e47140fd9fc1b2442b6f9aa9e611bd91e35 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:00:59 +0700 Subject: [PATCH 37/87] add support of 3 DB integrity modes (flushable, flagged, direct) --- cmd/opera/launcher/check.go | 9 +- cmd/opera/launcher/db-migrate.go | 42 ++++--- cmd/opera/launcher/export.go | 2 +- cmd/opera/launcher/launcher.go | 2 +- gossip/filters/filter_test.go | 16 +-- integration/assembly.go | 91 ++++++++------- integration/db.go | 118 ++++++++++---------- integration/routing.go | 62 +++++----- integration/status.go | 22 ++++ topicsdb/search_test.go | 2 +- utils/dbutil/asyncflushproducer/producer.go | 80 +++++++++++++ utils/dbutil/asyncflushproducer/store.go | 14 +++ utils/migration/migration.go | 12 +- utils/migration/migration_test.go | 1 + 14 files changed, 303 insertions(+), 170 deletions(-) create mode 100644 integration/status.go create mode 100644 utils/dbutil/asyncflushproducer/producer.go create mode 100644 utils/dbutil/asyncflushproducer/store.go diff --git a/cmd/opera/launcher/check.go b/cmd/opera/launcher/check.go index 43278ba48..6de284656 100644 --- a/cmd/opera/launcher/check.go +++ b/cmd/opera/launcher/check.go @@ -17,10 +17,7 @@ import ( ) func makeRawDbsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProducer { - dbsList, err := integration.SupportedDBs(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs.RuntimeCache) - if err != nil { - utils.Fatalf("Failed to initialize DB producers: %v", err) - } + dbsList, _ := integration.SupportedDBs(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs.RuntimeCache) if err := checkStateInitialized(dbsList); err != nil { utils.Fatalf(err.Error()) } @@ -28,7 +25,7 @@ func makeRawDbsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProduc } func makeMultiRawDbsProducer(dbsList map[multidb.TypeName]kvdb.IterableDBProducer, cfg *config) kvdb.FullDBProducer { - multiRawDbs, err := integration.MakeRawMultiProducer(dbsList, cfg.DBs.Routing) + multiRawDbs, err := integration.MakeDirectMultiProducer(dbsList, cfg.DBs.Routing) if err != nil { utils.Fatalf("Failed to initialize multi DB producer: %v", err) } @@ -37,7 +34,7 @@ func makeMultiRawDbsProducer(dbsList map[multidb.TypeName]kvdb.IterableDBProduce func makeRawDbsProducer(cfg *config) kvdb.FullDBProducer { dbsList := makeRawDbsProducers(cfg) - multiRawDbs, err := integration.MakeRawMultiProducer(dbsList, cfg.DBs.Routing) + multiRawDbs, err := integration.MakeDirectMultiProducer(dbsList, cfg.DBs.Routing) if err != nil { utils.Fatalf("Failed to initialize multi DB producer: %v", err) } diff --git a/cmd/opera/launcher/db-migrate.go b/cmd/opera/launcher/db-migrate.go index 62b68e894..96972ad5d 100644 --- a/cmd/opera/launcher/db-migrate.go +++ b/cmd/opera/launcher/db-migrate.go @@ -122,7 +122,7 @@ func dbMigrate(ctx *cli.Context) error { } func getDBProducersFor(chaindataDir string) map[multidb.TypeName]kvdb.FullDBProducer { - dbTypes, err := integration.SupportedDBs(chaindataDir, integration.DBsCacheConfig{ + dbTypes, _ := integration.SupportedDBs(chaindataDir, integration.DBsCacheConfig{ Table: map[string]integration.DBCacheConfig{ "": { Cache: 1024 * opt.MiB, @@ -130,12 +130,9 @@ func getDBProducersFor(chaindataDir string) map[multidb.TypeName]kvdb.FullDBProd }, }, }) - if err != nil { - log.Crit("Failed to construct DB producers", "err", err) - } wrappedDbTypes := make(map[multidb.TypeName]kvdb.FullDBProducer) for typ, producer := range dbTypes { - wrappedDbTypes[typ] = cachedproducer.WrapAll(&integration.DummyFlushableProducer{IterableDBProducer: producer}) + wrappedDbTypes[typ] = cachedproducer.WrapAll(&integration.DummyScopedProducer{IterableDBProducer: producer}) } return wrappedDbTypes } @@ -160,8 +157,6 @@ func (ee *dbMigrationEntries) Pop() *dbMigrationEntry { var dbLocatorOf = multidb.DBLocatorOf -var tableLocatorOf = multidb.TableLocatorOf - func readRoutes(cfg *config, dbTypes map[multidb.TypeName]kvdb.FullDBProducer) (map[string]dbMigrationEntry, error) { router, err := multidb.NewProducer(dbTypes, cfg.DBs.Routing.Table, integration.TablesKey) if err != nil { @@ -227,29 +222,40 @@ func writeCleanTableRecords(dbTypes map[multidb.TypeName]kvdb.FullDBProducer, by return nil } +func interchangeableType(a_, b_ multidb.TypeName, types map[multidb.TypeName]kvdb.FullDBProducer) bool { + for t_ := range types { + a, b, t := string(a_), string(b_), string(t_) + t = strings.TrimSuffix(t, "fsh") + t = strings.TrimSuffix(t, "flg") + t = strings.TrimSuffix(t, "drc") + if strings.HasPrefix(a, t) && strings.HasPrefix(b, t) { + return true + } + } + return false +} + func migrateComponent(datadir string, dbTypes, tmpDbTypes map[multidb.TypeName]kvdb.FullDBProducer, byReq map[string]dbMigrationEntry) error { byDB := separateIntoDBs(byReq) // if it can be migrated just by DB renaming if len(byDB) == 2 { - oldDBName := "" - newDBName := "" - typ := multidb.TypeName("") + oldDB := multidb.DBLocator{} + newDB := multidb.DBLocator{} ok := true for _, e := range byReq { - if len(typ) == 0 { - oldDBName = e.Old.Name - newDBName = e.New.Name - typ = e.New.Type + if len(oldDB.Type) == 0 { + oldDB = dbLocatorOf(e.Old) + newDB = dbLocatorOf(e.New) } - if e.Old.Table != e.New.Table || e.New.Name != newDBName || e.Old.Name != oldDBName || - e.Old.Type != typ || e.New.Type != typ { + if !interchangeableType(oldDB.Type, newDB.Type, dbTypes) || e.Old.Table != e.New.Table || e.New.Name != newDB.Name || + e.Old.Name != oldDB.Name || e.Old.Type != oldDB.Type || e.New.Type != newDB.Type { ok = false break } } if ok { - oldPath := path.Join(datadir, "chaindata", string(typ), oldDBName) - newPath := path.Join(datadir, "chaindata", string(typ), newDBName) + oldPath := path.Join(datadir, "chaindata", string(oldDB.Type), oldDB.Name) + newPath := path.Join(datadir, "chaindata", string(newDB.Type), newDB.Name) log.Info("Renaming DB", "old", oldPath, "new", newPath) return os.Rename(oldPath, newPath) } diff --git a/cmd/opera/launcher/export.go b/cmd/opera/launcher/export.go index 696431b39..abb3f6d8d 100644 --- a/cmd/opera/launcher/export.go +++ b/cmd/opera/launcher/export.go @@ -112,7 +112,7 @@ func checkStateInitialized(rawProducers map[multidb.TypeName]kvdb.IterableDBProd } func makeRawGossipStore(rawProducer kvdb.IterableDBProducer, cfg *config) (*gossip.Store, error) { - dbs := &integration.DummyFlushableProducer{rawProducer} + dbs := &integration.DummyScopedProducer{rawProducer} gdb := gossip.NewStore(dbs, cfg.OperaStore) return gdb, nil } diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index a4dc252f1..3f647e674 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -369,7 +369,7 @@ func makeNode(ctx *cli.Context, cfg *config, genesisStore *genesisstore.Store) ( gdb.Close() _ = cdb.Close() if closeDBs != nil { - closeDBs() + _ = closeDBs() } } } diff --git a/gossip/filters/filter_test.go b/gossip/filters/filter_test.go index 6846ad244..a58e554d6 100644 --- a/gossip/filters/filter_test.go +++ b/gossip/filters/filter_test.go @@ -21,9 +21,10 @@ import ( "io/ioutil" "math/big" "os" + "path" "testing" - "github.com/Fantom-foundation/lachesis-base/kvdb/table" + "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -31,9 +32,9 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/syndtr/goleveldb/leveldb/opt" "github.com/Fantom-foundation/go-opera/topicsdb" - "github.com/Fantom-foundation/go-opera/utils/adapters/ethdb2kvdb" ) func testConfig() Config { @@ -58,15 +59,16 @@ func BenchmarkFilters(b *testing.B) { } defer os.RemoveAll(dir) - ldb, err := rawdb.NewLevelDBDatabase(dir, 0, 0, "", false) + backend := newTestBackend() + ldb, err := rawdb.NewLevelDBDatabase(path.Join(dir, "backend-db"), 100, 1000, "", false) if err != nil { b.Fatal(err) } - defer ldb.Close() - - backend := newTestBackend() backend.db = rawdb.NewTable(ldb, "a") - backend.logIndex = topicsdb.New(table.New(ethdb2kvdb.Wrap(ldb), []byte("b"))) + backend.logIndex = topicsdb.New(leveldb.NewProducer(dir, func(string) (int, int) { + return 100 * opt.MiB, 1000 + })) + defer backend.logIndex.Close() var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") diff --git a/integration/assembly.go b/integration/assembly.go index 6cf577017..793e03d04 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -9,7 +9,6 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" @@ -132,31 +131,52 @@ func migrate(dbs kvdb.FlushableDBProducer, cfg Configs) error { return nil } -func makeEngine(genesisProducers, runtimeProducers map[multidb.TypeName]kvdb.IterableDBProducer, g *genesis.Genesis, emptyStart bool, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func(), error) { - { - if emptyStart && g == nil { +func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func() error, error) { + // Genesis processing + if genesisProc { + // use increased DB cache for genesis processing + genesisProducers, _ := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) + if g == nil { return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("missing --genesis flag for an empty datadir") } - // open raw DBs for performance reasons - dbs, err := MakeRawMultiProducer(genesisProducers, cfg.DBs.Routing) + dbs, err := MakeDirectMultiProducer(genesisProducers, cfg.DBs.Routing) if err != nil { return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to make DB multi-producer: %v", err) } - if emptyStart { - err = applyGenesis(dbs, *g, cfg) - if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to apply genesis state: %v", err) - } - } else { - err = migrate(dbs, cfg) - if err != nil { - return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to migrate state: %v", err) - } + err = applyGenesis(dbs, *g, cfg) + if err != nil { + _ = dbs.Close() + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to apply genesis state: %v", err) + } + _ = dbs.Close() + } + // Check DBs are synced + { + runtimeProducers, runtimeScopedProducers := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) + dbs, err := MakeMultiProducer(runtimeProducers, runtimeScopedProducers, cfg.DBs.Routing) + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } + _ = dbs.Close() } + // Migration + { + runtimeProducers, _ := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) + dbs, err := MakeDirectMultiProducer(runtimeProducers, cfg.DBs.Routing) + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + } + err = migrate(dbs, cfg) + _ = dbs.Close() + if err != nil { + return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to migrate state: %v", err) + } + } + // Live setup + runtimeProducers, runtimeScopedProducers := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) // open flushable DBs - dbs, closeDBs, err := MakeFlushableMultiProducer(runtimeProducers, cfg.DBs.Routing) + dbs, err := MakeMultiProducer(runtimeProducers, runtimeScopedProducers, cfg.DBs.Routing) if err != nil { return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } @@ -172,7 +192,7 @@ func makeEngine(genesisProducers, runtimeProducers map[multidb.TypeName]kvdb.Ite if err != nil { gdb.Close() cdb.Close() - closeDBs() + dbs.Close() } }() @@ -180,53 +200,45 @@ func makeEngine(genesisProducers, runtimeProducers map[multidb.TypeName]kvdb.Ite genesisID := gdb.GetGenesisID() if genesisID == nil { err = errors.New("malformed chainstore: genesis ID is not written") - return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + return nil, nil, nil, nil, gossip.BlockProc{}, dbs.Close, err } if g != nil { if *genesisID != g.GenesisID { err = &GenesisMismatchError{*genesisID, g.GenesisID} - return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + return nil, nil, nil, nil, gossip.BlockProc{}, dbs.Close, err } } engine, vecClock, blockProc, err := rawMakeEngine(gdb, cdb, nil, cfg) if err != nil { err = fmt.Errorf("failed to make engine: %v", err) - return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + return nil, nil, nil, nil, gossip.BlockProc{}, dbs.Close, err } - if emptyStart { + if genesisProc { err = gdb.Commit() if err != nil { err = fmt.Errorf("failed to commit DBs: %v", err) - return nil, nil, nil, nil, gossip.BlockProc{}, nil, err + return nil, nil, nil, nil, gossip.BlockProc{}, dbs.Close, err } } - return engine, vecClock, gdb, cdb, blockProc, closeDBs, nil + return engine, vecClock, gdb, cdb, blockProc, dbs.Close, nil } // MakeEngine makes consensus engine from config. -func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func()) { +func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func() error) { + dropAllDBsIfInterrupted(chaindataDir) + firstLaunch := isEmpty(chaindataDir) MakeDBDirs(chaindataDir) - // use increased DB cache for genesis processing - genesisProducers, err := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) - if err != nil { - utils.Fatalf("Failed to initialize DB producers: %v", err) - } - runtimeProducers, err := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) - if err != nil { - utils.Fatalf("Failed to initialize DB producers: %v", err) + if firstLaunch { + setGenesisProcessing(chaindataDir) } - firstLaunch := dropAllDBsIfInterrupted(runtimeProducers) - - engine, vecClock, gdb, cdb, blockProc, closeDBs, err := makeEngine(genesisProducers, runtimeProducers, g, firstLaunch, cfg) + engine, vecClock, gdb, cdb, blockProc, closeDBs, err := makeEngine(chaindataDir, g, firstLaunch, cfg) if err != nil { if firstLaunch { - for _, producer := range runtimeProducers { - dropAllDBs(producer) - } + dropAllDBs(chaindataDir) } utils.Fatalf("Failed to make engine: %v", err) } @@ -234,6 +246,7 @@ func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lac rules := gdb.GetRules() genesisID := gdb.GetGenesisID() if firstLaunch { + setGenesisComplete(chaindataDir) log.Info("Applied genesis state", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) } else { log.Info("Genesis is already written", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) diff --git a/integration/db.go b/integration/db.go index cd3ce6187..99f39b10f 100644 --- a/integration/db.go +++ b/integration/db.go @@ -1,6 +1,7 @@ package integration import ( + "io" "io/ioutil" "os" "path" @@ -9,14 +10,18 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/dag" "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/flaggedproducer" + "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/Fantom-foundation/lachesis-base/kvdb/pebble" "github.com/Fantom-foundation/lachesis-base/utils/fmtfilter" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/log" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/Fantom-foundation/go-opera/gossip" + "github.com/Fantom-foundation/go-opera/utils/dbutil/asyncflushproducer" ) type DBsConfig struct { @@ -104,18 +109,37 @@ func DefaultGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBs } } -func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName]kvdb.IterableDBProducer, error) { +func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName]kvdb.IterableDBProducer, map[multidb.TypeName]kvdb.FullDBProducer) { if chaindataDir == "inmemory" || chaindataDir == "" { chaindataDir, _ = ioutil.TempDir("", "opera-tmp") } cacher, err := dbCacheFdlimit(cfg) if err != nil { - return nil, err + utils.Fatalf("Failed to create DB cacher: %v", err) } + + leveldbFsh := leveldb.NewProducer(path.Join(chaindataDir, "leveldb-fsh"), cacher) + leveldbFlg := leveldb.NewProducer(path.Join(chaindataDir, "leveldb-flg"), cacher) + leveldbDrc := leveldb.NewProducer(path.Join(chaindataDir, "leveldb-drc"), cacher) + pebbleFsh := pebble.NewProducer(path.Join(chaindataDir, "pebble-fsh"), cacher) + pebbleFlg := pebble.NewProducer(path.Join(chaindataDir, "pebble-flg"), cacher) + pebbleDrc := pebble.NewProducer(path.Join(chaindataDir, "pebble-drc"), cacher) + return map[multidb.TypeName]kvdb.IterableDBProducer{ - "leveldb": leveldb.NewProducer(path.Join(chaindataDir, "leveldb"), cacher), - "pebble": pebble.NewProducer(path.Join(chaindataDir, "pebble"), cacher), - }, nil + "leveldb-fsh": leveldbFsh, + "leveldb-flg": leveldbFlg, + "leveldb-drc": leveldbDrc, + "pebble-fsh": pebbleFsh, + "pebble-flg": pebbleFlg, + "pebble-drc": pebbleDrc, + }, map[multidb.TypeName]kvdb.FullDBProducer{ + "leveldb-fsh": flushable.NewSyncedPool(leveldbFsh, FlushIDKey), + "leveldb-flg": flaggedproducer.Wrap(leveldbFlg, FlushIDKey), + "leveldb-drc": &DummyScopedProducer{leveldbDrc}, + "pebble-fsh": asyncflushproducer.Wrap(flushable.NewSyncedPool(pebbleFsh, FlushIDKey), 200000), + "pebble-flg": flaggedproducer.Wrap(pebbleFlg, FlushIDKey), + "pebble-drc": &DummyScopedProducer{pebbleDrc}, + } } func dbCacheFdlimit(cfg DBsCacheConfig) (func(string) (int, int), error) { @@ -151,56 +175,28 @@ func dbCacheFdlimit(cfg DBsCacheConfig) (func(string) (int, int), error) { }, nil } -func dropAllDBs(producer kvdb.IterableDBProducer) { - names := producer.Names() - for _, name := range names { - db, err := producer.OpenDB(name) - if err != nil { - continue - } - _ = db.Close() - db.Drop() +func isEmpty(dir string) bool { + f, err := os.Open(dir) + if err != nil { + return true } -} - -func isInterrupted(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { - for _, producer := range rawProducers { - names := producer.Names() - for _, name := range names { - db, err := producer.OpenDB(name) - if err != nil { - return false - } - flushID, err := db.Get(FlushIDKey) - _ = db.Close() - if err != nil { - return false - } - if flushID == nil { - return true - } - } + defer f.Close() + _, err = f.Readdirnames(1) + if err == io.EOF { + return true } return false } -func isEmpty(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { - for _, producer := range rawProducers { - if len(producer.Names()) > 0 { - return false - } - } - return true +func dropAllDBs(chaindataDir string) { + _ = os.RemoveAll(chaindataDir) } -func dropAllDBsIfInterrupted(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) bool { - if isInterrupted(rawProducers) { - for _, producer := range rawProducers { - dropAllDBs(producer) - } - return true +func dropAllDBsIfInterrupted(chaindataDir string) { + if isInterrupted(chaindataDir) { + log.Info("Restarting genesis processing") + dropAllDBs(chaindataDir) } - return isEmpty(rawProducers) } type GossipStoreAdapter struct { @@ -215,23 +211,31 @@ func (g *GossipStoreAdapter) GetEvent(id hash.Event) dag.Event { return e } -type DummyFlushableProducer struct { +func MakeDBDirs(chaindataDir string) { + dbs, _ := SupportedDBs(chaindataDir, DBsCacheConfig{}) + for typ := range dbs { + if err := os.MkdirAll(path.Join(chaindataDir, string(typ)), 0700); err != nil { + utils.Fatalf("Failed to create chaindata/leveldb directory: %v", err) + } + } +} + +type DummyScopedProducer struct { kvdb.IterableDBProducer } -func (p *DummyFlushableProducer) NotFlushedSizeEst() int { +func (d DummyScopedProducer) NotFlushedSizeEst() int { return 0 } -func (p *DummyFlushableProducer) Flush(_ []byte) error { +func (d DummyScopedProducer) Flush(_ []byte) error { return nil } -func MakeDBDirs(chaindataDir string) { - if err := os.MkdirAll(path.Join(chaindataDir, "leveldb"), 0700); err != nil { - utils.Fatalf("Failed to create chaindata/leveldb directory: %v", err) - } - if err := os.MkdirAll(path.Join(chaindataDir, "pebble"), 0700); err != nil { - utils.Fatalf("Failed to create chaindata/pebble directory: %v", err) - } +func (d DummyScopedProducer) Initialize(_ []string, flushID []byte) ([]byte, error) { + return flushID, nil +} + +func (d DummyScopedProducer) Close() error { + return nil } diff --git a/integration/routing.go b/integration/routing.go index 29fd607ce..542eb6f83 100644 --- a/integration/routing.go +++ b/integration/routing.go @@ -5,7 +5,6 @@ import ( "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/cachedproducer" - "github.com/Fantom-foundation/lachesis-base/kvdb/flushable" "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/Fantom-foundation/lachesis-base/kvdb/skipkeys" ) @@ -18,83 +17,74 @@ func DefaultRoutingConfig() RoutingConfig { return RoutingConfig{ Table: map[string]multidb.Route{ "": { - Type: "pebble", + Type: "pebble-fsh", }, "lachesis": { - Type: "pebble", + Type: "pebble-fsh", Name: "main", Table: ">", }, "gossip": { - Type: "pebble", + Type: "pebble-fsh", Name: "main", }, "evm": { - Type: "pebble", + Type: "pebble-fsh", Name: "main", }, "gossip/e": { - Type: "pebble", + Type: "pebble-fsh", Name: "events", }, "evm/M": { - Type: "pebble", + Type: "pebble-drc", Name: "evm-data", }, "evm-logs": { - Type: "pebble", + Type: "pebble-fsh", Name: "evm-logs", }, "gossip-%d": { - Type: "leveldb", + Type: "leveldb-fsh", Name: "epoch-%d", Table: "G", }, "lachesis-%d": { - Type: "leveldb", - Name: "epoch-%d", - Table: "L", + Type: "leveldb-fsh", + Name: "epoch-%d", + Table: "L", + NoDrop: true, }, }, } } -func MakeFlushableMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, func(), error) { - flushables := make(map[multidb.TypeName]kvdb.FullDBProducer) +func MakeMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, scopedProducers map[multidb.TypeName]kvdb.FullDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { + cachedProducers := make(map[multidb.TypeName]kvdb.FullDBProducer) var flushID []byte var err error - var closeDBs = func() {} - for typ, producer := range rawProducers { - existingDBs := producer.Names() - flushablePool := flushable.NewSyncedPool(producer, FlushIDKey) - prevCloseDBs := closeDBs - closeDBs = func() { - prevCloseDBs() - _ = flushablePool.Close() - } - flushID, err = flushablePool.Initialize(existingDBs, flushID) + for typ, producer := range scopedProducers { + flushID, err = producer.Initialize(rawProducers[typ].Names(), flushID) if err != nil { - return nil, nil, fmt.Errorf("failed to open existing databases: %v", err) + return nil, fmt.Errorf("failed to open existing databases: %v", err) } - flushables[typ] = cachedproducer.WrapAll(flushablePool) + cachedProducers[typ] = cachedproducer.WrapAll(producer) } - p, err := makeMultiProducer(flushables, cfg) - return p, closeDBs, err + p, err := makeMultiProducer(cachedProducers, cfg) + return p, err } -func MakeRawMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { - flushables := make(map[multidb.TypeName]kvdb.FullDBProducer) +func MakeDirectMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { + dproducers := map[multidb.TypeName]kvdb.FullDBProducer{} for typ, producer := range rawProducers { - flushables[typ] = cachedproducer.WrapAll(&DummyFlushableProducer{producer}) + dproducers[typ] = &DummyScopedProducer{producer} } - - p, err := makeMultiProducer(flushables, cfg) - return p, err + return MakeMultiProducer(rawProducers, dproducers, cfg) } -func makeMultiProducer(producers map[multidb.TypeName]kvdb.FullDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { - multi, err := multidb.NewProducer(producers, cfg.Table, TablesKey) +func makeMultiProducer(scopedProducers map[multidb.TypeName]kvdb.FullDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { + multi, err := multidb.NewProducer(scopedProducers, cfg.Table, TablesKey) if err != nil { return nil, fmt.Errorf("failed to construct multidb: %v", err) } diff --git a/integration/status.go b/integration/status.go new file mode 100644 index 000000000..b87b6af55 --- /dev/null +++ b/integration/status.go @@ -0,0 +1,22 @@ +package integration + +import ( + "os" + "path" +) + +func isInterrupted(chaindataDir string) bool { + _, err := os.Stat(path.Join(chaindataDir, "unfinished")) + return err == nil +} + +func setGenesisProcessing(chaindataDir string) { + f, _ := os.Create(path.Join(chaindataDir, "unfinished")) + if f != nil { + _ = f.Close() + } +} + +func setGenesisComplete(chaindataDir string) { + _ = os.Remove(path.Join(chaindataDir, "unfinished")) +} diff --git a/topicsdb/search_test.go b/topicsdb/search_test.go index b247e04c0..1bc2d8432 100644 --- a/topicsdb/search_test.go +++ b/topicsdb/search_test.go @@ -15,7 +15,7 @@ func BenchmarkSearch(b *testing.B) { topics, recs, topics4rec := genTestData(1000) mem := memorydb.NewProducer("") - index := New(mem, false) + index := New(mem) for _, rec := range recs { err := index.Push(rec) diff --git a/utils/dbutil/asyncflushproducer/producer.go b/utils/dbutil/asyncflushproducer/producer.go new file mode 100644 index 000000000..e4b01d9ea --- /dev/null +++ b/utils/dbutil/asyncflushproducer/producer.go @@ -0,0 +1,80 @@ +package asyncflushproducer + +import ( + "errors" + "sync" + + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/ethereum/go-ethereum/metrics" +) + +type Producer struct { + kvdb.FullDBProducer + mu sync.Mutex + dbs map[string]*store + stats metrics.Meter + + threshold uint64 +} + +func Wrap(backend kvdb.FullDBProducer, threshold uint64) *Producer { + return &Producer{ + stats: metrics.NewMeterForced(), + FullDBProducer: backend, + dbs: make(map[string]*store), + threshold: threshold, + } +} + +func (f *Producer) OpenDB(name string) (kvdb.Store, error) { + // open existing DB + f.mu.Lock() + openedDB := f.dbs[name] + f.mu.Unlock() + if openedDB != nil { + return openedDB, nil + } + // create new DB + db, err := f.FullDBProducer.OpenDB(name) + if err != nil { + return nil, err + } + f.mu.Lock() + defer f.mu.Unlock() + if f.dbs[name] != nil { + return nil, errors.New("already opened") + } + wrapped := &store{ + Store: db, + CloseFn: func() error { + f.mu.Lock() + delete(f.dbs, name) + f.mu.Unlock() + return db.Close() + }, + } + f.dbs[name] = wrapped + return wrapped, nil +} + +func (f *Producer) Flush(id []byte) error { + f.stats.Mark(int64(f.FullDBProducer.NotFlushedSizeEst())) + + err := f.FullDBProducer.Flush(id) + if err != nil { + return err + } + + // trigger flushing data to disk if throughput is below a threshold + if uint64(f.stats.Rate1()) <= f.threshold { + go func() { + f.mu.Lock() + defer f.mu.Unlock() + for _, db := range f.dbs { + _, _ = db.Stat("async_flush") + } + }() + } + + return nil +} diff --git a/utils/dbutil/asyncflushproducer/store.go b/utils/dbutil/asyncflushproducer/store.go new file mode 100644 index 000000000..1aa57d88d --- /dev/null +++ b/utils/dbutil/asyncflushproducer/store.go @@ -0,0 +1,14 @@ +package asyncflushproducer + +import ( + "github.com/Fantom-foundation/lachesis-base/kvdb" +) + +type store struct { + kvdb.Store + CloseFn func() error +} + +func (s *store) Close() error { + return s.CloseFn() +} diff --git a/utils/migration/migration.go b/utils/migration/migration.go index 72151edac..b936ea1aa 100644 --- a/utils/migration/migration.go +++ b/utils/migration/migration.go @@ -39,22 +39,26 @@ func (m *Migration) Next(name string, exec func() error) *Migration { } } -// ID is an uniq migration's id. -func (m *Migration) ID() string { +func idOf(name string) string { digest := sha256.New() - digest.Write([]byte(m.name)) + digest.Write([]byte(name)) bytes := digest.Sum(nil) return fmt.Sprintf("%x", bytes) } +// ID is an uniq migration's id. +func (m *Migration) ID() string { + return idOf(m.name) +} + // Exec method run migrations chain in order func (m *Migration) Exec(curr IDStore, flush func() error) error { currID := curr.GetID() myID := m.ID() if m.veryFirst() { - if currID != "" && currID != myID { + if currID != myID { return errors.New("unknown version: " + currID) } return nil diff --git a/utils/migration/migration_test.go b/utils/migration/migration_test.go index b4fa1b8f9..1b11be53b 100644 --- a/utils/migration/migration_test.go +++ b/utils/migration/migration_test.go @@ -15,6 +15,7 @@ func TestMigrations(t *testing.T) { require := require.New(t) native := Begin("native") + curVer.lastID = idOf("native") num := 1 lastGood := native.Next("01", From 430a0d8ad9604c26d33db58cb8cca392b47f81b5 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:01:37 +0700 Subject: [PATCH 38/87] add automatic EVM recover after abrupt stopping --- gossip/c_block_callbacks.go | 42 +++++++++++++++++++++++++++++++++++++ gossip/service.go | 1 + 2 files changed, 43 insertions(+) diff --git a/gossip/c_block_callbacks.go b/gossip/c_block_callbacks.go index c85214c80..735e5b760 100644 --- a/gossip/c_block_callbacks.go +++ b/gossip/c_block_callbacks.go @@ -458,6 +458,48 @@ func consensusCallbackBeginBlockFn( } } +func (s *Service) ReexecuteBlocks(from, to idx.Block) { + blockProc := s.blockProcModules + upgradeHeights := s.store.GetUpgradeHeights() + evmStateReader := s.GetEvmStateReader() + prev := s.store.GetBlock(from) + for b := from + 1; b <= to; b++ { + block := s.store.GetBlock(b) + blockCtx := iblockproc.BlockCtx{ + Idx: b, + Time: block.Time, + Atropos: block.Atropos, + } + statedb, err := s.store.evm.StateDB(prev.Root) + if err != nil { + log.Crit("Failue to re-execute blocks", "err", err) + } + es := s.store.GetHistoryEpochState(s.store.FindBlockEpoch(b)) + evmProcessor := blockProc.EVMModule.Start(blockCtx, statedb, evmStateReader, func(t *types.Log) {}, es.Rules, es.Rules.EvmChainConfig(upgradeHeights)) + txs := s.store.GetBlockTxs(b, block) + evmProcessor.Execute(txs) + evmProcessor.Finalize() + _ = s.store.evm.Commit(b, block.Root, false) + s.store.evm.Cap() + s.mayCommit(false) + prev = block + } +} + +func (s *Service) RecoverEVM() { + start := s.store.GetLatestBlockIndex() + for b := start; b >= 1 && b > start-20000; b-- { + block := s.store.GetBlock(b) + if s.store.evm.HasStateDB(block.Root) { + if b != start { + s.Log.Warn("Reexecuting blocks after abrupt stopping", "from", b, "to", start) + s.ReexecuteBlocks(b, start) + } + break + } + } +} + // spillBlockEvents excludes first events which exceed MaxBlockGas func spillBlockEvents(store *Store, block *inter.Block, network opera.Rules) (*inter.Block, inter.EventPayloads) { fullEvents := make(inter.EventPayloads, len(block.Events)) diff --git a/gossip/service.go b/gossip/service.go index f6b2b42b8..464d6f346 100644 --- a/gossip/service.go +++ b/gossip/service.go @@ -433,6 +433,7 @@ func (s *Service) Start() error { if s.store.evm.IsEvmSnapshotPaused() && !s.config.AllowSnapsync { return errors.New("cannot halt snapsync and start fullsync") } + s.RecoverEVM() root := s.store.GetBlockState().FinalizedStateRoot if !s.store.evm.HasStateDB(root) { if !s.config.AllowSnapsync { From b568d12d5de28840e0c796e4fcd8f5a07246e749 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:02:26 +0700 Subject: [PATCH 39/87] add automatic migration from pre-multi-db layout --- integration/assembly.go | 16 +++ integration/diskusage.go | 42 +++++++ integration/diskusage_openbsd.go | 43 +++++++ integration/diskusage_windows.go | 38 ++++++ integration/legacy_migrate.go | 191 +++++++++++++++++++++++++++++++ 5 files changed, 330 insertions(+) create mode 100644 integration/diskusage.go create mode 100644 integration/diskusage_openbsd.go create mode 100644 integration/diskusage_windows.go create mode 100644 integration/legacy_migrate.go diff --git a/integration/assembly.go b/integration/assembly.go index 793e03d04..8a07c141b 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "errors" "fmt" + "path" "github.com/Fantom-foundation/lachesis-base/abft" "github.com/Fantom-foundation/lachesis-base/hash" @@ -228,6 +229,21 @@ func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg C // MakeEngine makes consensus engine from config. func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func() error) { + // Legacy DBs migrate + if !isEmpty(path.Join(chaindataDir, "gossip")) { + MakeDBDirs(chaindataDir) + genesisProducers, _ := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) + dbs, err := MakeDirectMultiProducer(genesisProducers, cfg.DBs.Routing) + if err != nil { + utils.Fatalf("Failed to make engine: %v", err) + } + err = migrateLegacyDBs(chaindataDir, dbs) + _ = dbs.Close() + if err != nil { + utils.Fatalf("Failed to migrate state: %v", err) + } + } + dropAllDBsIfInterrupted(chaindataDir) firstLaunch := isEmpty(chaindataDir) MakeDBDirs(chaindataDir) diff --git a/integration/diskusage.go b/integration/diskusage.go new file mode 100644 index 000000000..707b082d4 --- /dev/null +++ b/integration/diskusage.go @@ -0,0 +1,42 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build !windows,!openbsd + +package integration + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func getFreeDiskSpace(path string) (uint64, error) { + var stat unix.Statfs_t + if err := unix.Statfs(path, &stat); err != nil { + return 0, fmt.Errorf("failed to call Statfs: %v", err) + } + + // Available blocks * size per block = available space in bytes + var bavail = stat.Bavail + if stat.Bavail < 0 { + // FreeBSD can have a negative number of blocks available + // because of the grace limit. + bavail = 0 + } + //nolint:unconvert + return uint64(bavail) * uint64(stat.Bsize), nil +} diff --git a/integration/diskusage_openbsd.go b/integration/diskusage_openbsd.go new file mode 100644 index 000000000..7d2986d05 --- /dev/null +++ b/integration/diskusage_openbsd.go @@ -0,0 +1,43 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build openbsd + +package integration + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func getFreeDiskSpace(path string) (uint64, error) { + var stat unix.Statfs_t + if err := unix.Statfs(path, &stat); err != nil { + return 0, fmt.Errorf("failed to call Statfs: %v", err) + } + + // Available blocks * size per block = available space in bytes + var bavail = stat.F_bavail + // Not sure if the following check is necessary for OpenBSD + if stat.F_bavail < 0 { + // FreeBSD can have a negative number of blocks available + // because of the grace limit. + bavail = 0 + } + //nolint:unconvert + return uint64(bavail) * uint64(stat.F_bsize), nil +} diff --git a/integration/diskusage_windows.go b/integration/diskusage_windows.go new file mode 100644 index 000000000..8195fc5dc --- /dev/null +++ b/integration/diskusage_windows.go @@ -0,0 +1,38 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package integration + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +func getFreeDiskSpace(path string) (uint64, error) { + + cwd, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, fmt.Errorf("failed to call UTF16PtrFromString: %v", err) + } + + var freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes uint64 + if err := windows.GetDiskFreeSpaceEx(cwd, &freeBytesAvailableToCaller, &totalNumberOfBytes, &totalNumberOfFreeBytes); err != nil { + return 0, fmt.Errorf("failed to call GetDiskFreeSpaceEx: %v", err) + } + + return freeBytesAvailableToCaller, nil +} diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go new file mode 100644 index 000000000..dbdbff8bc --- /dev/null +++ b/integration/legacy_migrate.go @@ -0,0 +1,191 @@ +package integration + +import ( + "fmt" + "path" + "strings" + + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/batched" + "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" + "github.com/Fantom-foundation/lachesis-base/kvdb/skipkeys" + "github.com/Fantom-foundation/lachesis-base/kvdb/table" + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +func moveDB(src, dst_ kvdb.Store, name, dir string) error { + dst := batched.Wrap(dst_) + defer dst.Flush() + + // start from previously written data, if any + var start []byte + for b := 0xff; b > 0; b-- { + if !isEmptyDB(table.New(dst, []byte{byte(b)})) { + start = []byte{byte(b)} + break + } + } + + const batchKeys = 1000000 + keys := make([][]byte, 0, batchKeys) + values := make([][]byte, 0, batchKeys) + it := src.NewIterator(nil, start) + defer it.Release() + log.Info("Transforming DB layout", "db", name) + for next := true; next; { + for len(keys) < batchKeys { + next = it.Next() + if !next { + break + } + keys = append(keys, common.CopyBytes(it.Key())) + values = append(values, common.CopyBytes(it.Value())) + } + for i := 0; i < len(keys); i++ { + err := dst.Put(keys[i], values[i]) + if err != nil { + return err + } + } + freeSpace, err := getFreeDiskSpace(dir) + if err != nil { + log.Error("Failed to retrieve free disk space", "err", err) + } else if len(keys) > 0 && freeSpace < 50*opt.GiB { + log.Warn("Not enough disk space. Trimming source DB records", "space_GB", freeSpace/opt.GiB) + _ = dst.Flush() + _, _ = dst.Stat("async_flush") + for i := 0; i < len(keys); i++ { + err := src.Delete(keys[i]) + if err != nil { + return err + } + } + _ = src.Compact(keys[0], keys[len(keys)-1]) + } + keys = keys[:0] + values = values[:0] + } + return nil +} + +func isEmptyDB(db kvdb.Iteratee) bool { + it := db.NewIterator(nil, nil) + defer it.Release() + return !it.Next() +} + +func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { + if !isEmpty(path.Join(chaindataDir, "gossip")) { + // migrate DB layout + cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "": { + Cache: 1024 * opt.MiB, + Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), + }, + }, + }) + if err != nil { + return err + } + oldDBs := leveldb.NewProducer(chaindataDir, cacheFn) + + // move lachesis DB + lachesisDB, err := oldDBs.OpenDB("lachesis") + if err != nil { + return err + } + newDB, err := dbs.OpenDB("lachesis") + if err != nil { + return err + } + err = moveDB(skipkeys.Wrap(lachesisDB, MetadataPrefix), newDB, "lachesis", chaindataDir) + newDB.Close() + lachesisDB.Close() + if err != nil { + return err + } + lachesisDB.Drop() + + // move lachesis-%d and gossip-%d DBs + for _, name := range oldDBs.Names() { + if strings.HasPrefix(name, "lachesis-") || strings.HasPrefix(name, "gossip-") { + oldDB, err := oldDBs.OpenDB(name) + if err != nil { + return err + } + newDB, err = dbs.OpenDB(name) + if err != nil { + return err + } + err = moveDB(skipkeys.Wrap(oldDB, MetadataPrefix), newDB, name, chaindataDir) + newDB.Close() + oldDB.Close() + if err != nil { + return err + } + oldDB.Drop() + } + } + + // move gossip DB + gossipDB, err := oldDBs.OpenDB("gossip") + if err != nil { + return err + } + if err = func() error { + defer gossipDB.Close() + + // move logs + newDB, err = dbs.OpenDB("evm-logs/r") + if err != nil { + return err + } + err = moveDB(table.New(gossipDB, []byte("Lr")), newDB, "gossip/Lr", chaindataDir) + newDB.Close() + if err != nil { + return err + } + + newDB, err = dbs.OpenDB("evm-logs/t") + err = moveDB(table.New(gossipDB, []byte("Lt")), newDB, "gossip/Lt", chaindataDir) + newDB.Close() + if err != nil { + return err + } + + // skip 0 prefix, as it contains flushID + for b := 1; b <= 0xff; b++ { + if b == int('L') { + // logs are already moved above + continue + } + if isEmptyDB(table.New(gossipDB, []byte{byte(b)})) { + continue + } + if b == int('M') || b == int('r') || b == int('x') || b == int('X') { + newDB, err = dbs.OpenDB("evm/" + string([]byte{byte(b)})) + } else { + newDB, err = dbs.OpenDB("gossip/" + string([]byte{byte(b)})) + } + if err != nil { + return err + } + err = moveDB(table.New(gossipDB, []byte{byte(b)}), newDB, fmt.Sprintf("gossip/%c", rune(b)), chaindataDir) + newDB.Close() + if err != nil { + return err + } + } + return nil + }(); err != nil { + return err + } + gossipDB.Drop() + } + + return nil +} From e369a09c7b86c692ddee18f06e8876dea5107bd5 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:05:04 +0700 Subject: [PATCH 40/87] rename `db migrate` to `db transform` --- .../launcher/{db-migrate.go => db-transform.go} | 16 ++++++++-------- cmd/opera/launcher/dbcmd.go | 8 ++++---- integration/routing.go | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) rename cmd/opera/launcher/{db-migrate.go => db-transform.go} (94%) diff --git a/cmd/opera/launcher/db-migrate.go b/cmd/opera/launcher/db-transform.go similarity index 94% rename from cmd/opera/launcher/db-migrate.go rename to cmd/opera/launcher/db-transform.go index 96972ad5d..a53eb6341 100644 --- a/cmd/opera/launcher/db-migrate.go +++ b/cmd/opera/launcher/db-transform.go @@ -21,7 +21,7 @@ import ( "github.com/Fantom-foundation/go-opera/integration" ) -func dbMigrate(ctx *cli.Context) error { +func dbTransform(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) tmpPath := path.Join(cfg.Node.DataDir, "tmp") @@ -38,7 +38,7 @@ func dbMigrate(ctx *cli.Context) error { } byDB := separateIntoDBs(byReq) - // weed out DBs which don't need migration + // weed out DBs which don't need transformation { for _, byReqOfDB := range byDB { match := true @@ -56,7 +56,7 @@ func dbMigrate(ctx *cli.Context) error { } } if len(byReq) == 0 { - log.Info("No DB migration is needed") + log.Info("No DB transformation is needed") return nil } @@ -103,9 +103,9 @@ func dbMigrate(ctx *cli.Context) error { tmpDbTypes := getDBProducersFor(path.Join(cfg.Node.DataDir, "tmp")) for _, component := range byComponents { - err := migrateComponent(cfg.Node.DataDir, dbTypes, tmpDbTypes, component) + err := transformComponent(cfg.Node.DataDir, dbTypes, tmpDbTypes, component) if err != nil { - log.Crit("Failed to migrate component", "err", err) + log.Crit("Failed to transform component", "err", err) } } id := bigendian.Uint64ToBytes(uint64(time.Now().UnixNano())) @@ -116,7 +116,7 @@ func dbMigrate(ctx *cli.Context) error { } } - log.Info("DB migration is complete") + log.Info("DB transformation is complete") return nil } @@ -235,9 +235,9 @@ func interchangeableType(a_, b_ multidb.TypeName, types map[multidb.TypeName]kvd return false } -func migrateComponent(datadir string, dbTypes, tmpDbTypes map[multidb.TypeName]kvdb.FullDBProducer, byReq map[string]dbMigrationEntry) error { +func transformComponent(datadir string, dbTypes, tmpDbTypes map[multidb.TypeName]kvdb.FullDBProducer, byReq map[string]dbMigrationEntry) error { byDB := separateIntoDBs(byReq) - // if it can be migrated just by DB renaming + // if it can be transformed just by DB renaming if len(byDB) == 2 { oldDB := multidb.DBLocator{} newDB := multidb.DBLocator{} diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index a6ccaa172..cc7848002 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -35,16 +35,16 @@ will compact all databases under datadir's chaindata. `, }, { - Name: "migrate", - Usage: "Migrate tables layout", + Name: "transform", + Usage: "Transform DBs layout", ArgsUsage: "", - Action: utils.MigrateFlags(dbMigrate), + Action: utils.MigrateFlags(dbTransform), Category: "DB COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, }, Description: ` -opera db migrate +opera db transform will migrate tables layout according to the configuration. `, }, diff --git a/integration/routing.go b/integration/routing.go index 542eb6f83..22e39f49e 100644 --- a/integration/routing.go +++ b/integration/routing.go @@ -91,7 +91,7 @@ func makeMultiProducer(scopedProducers map[multidb.TypeName]kvdb.FullDBProducer, err = multi.Verify() if err != nil { - return nil, fmt.Errorf("incompatible chainstore DB layout: %v. Try to use 'db migrate' to recover", err) + return nil, fmt.Errorf("incompatible chainstore DB layout: %v. Try to use 'db transform' to recover", err) } return skipkeys.WrapAllProducer(multi, MetadataPrefix), nil } From b408c39e7cb13effa91fb685bd44f75cbb398394 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:07:03 +0700 Subject: [PATCH 41/87] rename `db fix` to `db heal` --- cmd/opera/launcher/dbcmd.go | 10 +++++----- cmd/opera/launcher/fixdirty.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index cc7848002..2bd26a6fa 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -49,18 +49,18 @@ will migrate tables layout according to the configuration. `, }, { - Name: "fix", - Usage: "Experimental - try to fix dirty DB", + Name: "heal", + Usage: "Experimental - try to heal dirty DB", ArgsUsage: "", - Action: utils.MigrateFlags(fixDirty), + Action: utils.MigrateFlags(healDirty), Category: "DB COMMANDS", Flags: []cli.Flag{ utils.DataDirFlag, experimentalFlag, }, Description: ` -opera db fix --experimental -Experimental - try to fix dirty DB. +opera db heal --experimental +Experimental - try to heal dirty DB. `, }, }, diff --git a/cmd/opera/launcher/fixdirty.go b/cmd/opera/launcher/fixdirty.go index 36ad17c28..20641c95d 100644 --- a/cmd/opera/launcher/fixdirty.go +++ b/cmd/opera/launcher/fixdirty.go @@ -24,8 +24,8 @@ import ( // maxEpochsToTry represents amount of last closed epochs to try (in case that the last one has the state unavailable) const maxEpochsToTry = 10000 -// fixDirty is the fixdirty command. -func fixDirty(ctx *cli.Context) error { +// healDirty is the 'db heal' command. +func healDirty(ctx *cli.Context) error { if !ctx.Bool(experimentalFlag.Name) { utils.Fatalf("Add --experimental flag") } From 60f65240c9495d3399a6bbddee4f8c7994eae9b4 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:08:24 +0700 Subject: [PATCH 42/87] tip to use `db heal` command --- integration/routing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/routing.go b/integration/routing.go index 22e39f49e..b1e3b8c5b 100644 --- a/integration/routing.go +++ b/integration/routing.go @@ -66,7 +66,7 @@ func MakeMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer for typ, producer := range scopedProducers { flushID, err = producer.Initialize(rawProducers[typ].Names(), flushID) if err != nil { - return nil, fmt.Errorf("failed to open existing databases: %v", err) + return nil, fmt.Errorf("failed to open existing databases: %v. Try to use 'db heal' to recover", err) } cachedProducers[typ] = cachedproducer.WrapAll(producer) } From c192e0ca25c01411748e7afd855ab07665795d08 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 01:20:36 +0700 Subject: [PATCH 43/87] reduce scaling of MaxNonFlushedSize with RAM size --- gossip/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gossip/config.go b/gossip/config.go index 5b6d78bd1..ebbd5d34d 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -269,7 +269,7 @@ func DefaultStoreConfig(scale cachescale.Func) StoreConfig { LlrEpochVotesIndexes: scale.I(5), }, EVM: evmstore.DefaultStoreConfig(scale), - MaxNonFlushedSize: 17*opt.MiB + scale.I(5*opt.MiB), + MaxNonFlushedSize: 20*opt.MiB + scale.I(2*opt.MiB), MaxNonFlushedPeriod: 30 * time.Minute, } } From bbcd5e31830f6153c5fd05e4c13e0723b718619b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 23:11:52 +0700 Subject: [PATCH 44/87] fix race condition in asyncflushproducer.OpenDB when the same DB is created twice --- utils/dbutil/asyncflushproducer/producer.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/utils/dbutil/asyncflushproducer/producer.go b/utils/dbutil/asyncflushproducer/producer.go index e4b01d9ea..f16dc21fe 100644 --- a/utils/dbutil/asyncflushproducer/producer.go +++ b/utils/dbutil/asyncflushproducer/producer.go @@ -27,10 +27,10 @@ func Wrap(backend kvdb.FullDBProducer, threshold uint64) *Producer { } func (f *Producer) OpenDB(name string) (kvdb.Store, error) { - // open existing DB f.mu.Lock() + defer f.mu.Unlock() + // open existing DB openedDB := f.dbs[name] - f.mu.Unlock() if openedDB != nil { return openedDB, nil } @@ -39,8 +39,6 @@ func (f *Producer) OpenDB(name string) (kvdb.Store, error) { if err != nil { return nil, err } - f.mu.Lock() - defer f.mu.Unlock() if f.dbs[name] != nil { return nil, errors.New("already opened") } From d97c6494921811cb0f4a42753f8e0500cc768893 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 26 Jul 2022 23:44:15 +0700 Subject: [PATCH 45/87] refactor DBs producers in launcher --- cmd/opera/launcher/check.go | 38 ++---------------- cmd/opera/launcher/consolecmd_test.go | 2 +- cmd/opera/launcher/db-transform.go | 23 +---------- cmd/opera/launcher/dbcmd.go | 55 ++++++++++++++++++++++++++- cmd/opera/launcher/export.go | 35 +---------------- cmd/opera/launcher/fake_test.go | 4 +- cmd/opera/launcher/fixdirty.go | 11 ++---- cmd/opera/launcher/genesiscmd.go | 7 +--- cmd/opera/launcher/import.go | 7 +--- cmd/opera/launcher/snapshotcmd.go | 33 ++++++---------- integration/assembly.go | 22 +++++++---- 11 files changed, 99 insertions(+), 138 deletions(-) diff --git a/cmd/opera/launcher/check.go b/cmd/opera/launcher/check.go index 6de284656..a77393668 100644 --- a/cmd/opera/launcher/check.go +++ b/cmd/opera/launcher/check.go @@ -1,46 +1,17 @@ package launcher import ( - "path" "time" "github.com/Fantom-foundation/lachesis-base/inter/idx" - "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "gopkg.in/urfave/cli.v1" - "github.com/Fantom-foundation/go-opera/integration" "github.com/Fantom-foundation/go-opera/inter" ) -func makeRawDbsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProducer { - dbsList, _ := integration.SupportedDBs(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs.RuntimeCache) - if err := checkStateInitialized(dbsList); err != nil { - utils.Fatalf(err.Error()) - } - return dbsList -} - -func makeMultiRawDbsProducer(dbsList map[multidb.TypeName]kvdb.IterableDBProducer, cfg *config) kvdb.FullDBProducer { - multiRawDbs, err := integration.MakeDirectMultiProducer(dbsList, cfg.DBs.Routing) - if err != nil { - utils.Fatalf("Failed to initialize multi DB producer: %v", err) - } - return multiRawDbs -} - -func makeRawDbsProducer(cfg *config) kvdb.FullDBProducer { - dbsList := makeRawDbsProducers(cfg) - multiRawDbs, err := integration.MakeDirectMultiProducer(dbsList, cfg.DBs.Routing) - if err != nil { - utils.Fatalf("Failed to initialize multi DB producer: %v", err) - } - return multiRawDbs -} - func checkEvm(ctx *cli.Context) error { if len(ctx.Args()) != 0 { utils.Fatalf("This command doesn't require an argument.") @@ -48,11 +19,8 @@ func checkEvm(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) defer gdb.Close() evms := gdb.EvmStore() @@ -89,7 +57,7 @@ func checkEvm(ctx *cli.Context) error { }) } - err = evms.CheckEvm(checkBlocks) + err := evms.CheckEvm(checkBlocks) if err != nil { return err } diff --git a/cmd/opera/launcher/consolecmd_test.go b/cmd/opera/launcher/consolecmd_test.go index 7fc8b8e57..df26d51f9 100644 --- a/cmd/opera/launcher/consolecmd_test.go +++ b/cmd/opera/launcher/consolecmd_test.go @@ -24,7 +24,7 @@ const ( // Tests that a node embedded within a console can be started up properly and // then terminated by closing the input stream. func TestConsoleWelcome(t *testing.T) { - // Start a opera console, make sure it's cleaned up and terminate the console + // Start an opera console, make sure it's cleaned up and terminate the console cli := exec(t, "--fakenet", "0/1", "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "console") diff --git a/cmd/opera/launcher/db-transform.go b/cmd/opera/launcher/db-transform.go index a53eb6341..59b933ef0 100644 --- a/cmd/opera/launcher/db-transform.go +++ b/cmd/opera/launcher/db-transform.go @@ -9,13 +9,10 @@ import ( "github.com/Fantom-foundation/lachesis-base/common/bigendian" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/batched" - "github.com/Fantom-foundation/lachesis-base/kvdb/cachedproducer" "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/Fantom-foundation/lachesis-base/kvdb/table" - "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/syndtr/goleveldb/leveldb/opt" "gopkg.in/urfave/cli.v1" "github.com/Fantom-foundation/go-opera/integration" @@ -30,7 +27,7 @@ func dbTransform(ctx *cli.Context) error { defer os.RemoveAll(tmpPath) // get supported DB producers - dbTypes := getDBProducersFor(path.Join(cfg.Node.DataDir, "chaindata")) + dbTypes := makeUncheckedCachedDBsProducers(path.Join(cfg.Node.DataDir, "chaindata")) byReq, err := readRoutes(cfg, dbTypes) if err != nil { @@ -101,7 +98,7 @@ func dbTransform(ctx *cli.Context) error { byComponents = append(byComponents, component) } - tmpDbTypes := getDBProducersFor(path.Join(cfg.Node.DataDir, "tmp")) + tmpDbTypes := makeUncheckedCachedDBsProducers(path.Join(cfg.Node.DataDir, "tmp")) for _, component := range byComponents { err := transformComponent(cfg.Node.DataDir, dbTypes, tmpDbTypes, component) if err != nil { @@ -121,22 +118,6 @@ func dbTransform(ctx *cli.Context) error { return nil } -func getDBProducersFor(chaindataDir string) map[multidb.TypeName]kvdb.FullDBProducer { - dbTypes, _ := integration.SupportedDBs(chaindataDir, integration.DBsCacheConfig{ - Table: map[string]integration.DBCacheConfig{ - "": { - Cache: 1024 * opt.MiB, - Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), - }, - }, - }) - wrappedDbTypes := make(map[multidb.TypeName]kvdb.FullDBProducer) - for typ, producer := range dbTypes { - wrappedDbTypes[typ] = cachedproducer.WrapAll(&integration.DummyScopedProducer{IterableDBProducer: producer}) - } - return wrappedDbTypes -} - type dbMigrationEntry struct { Req string Old multidb.Route diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index 2bd26a6fa..866a53803 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -2,11 +2,19 @@ package launcher import ( "fmt" + "path" + "github.com/Fantom-foundation/lachesis-base/kvdb" + "github.com/Fantom-foundation/lachesis-base/kvdb/cachedproducer" + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb/opt" "gopkg.in/urfave/cli.v1" + + "github.com/Fantom-foundation/go-opera/gossip" + "github.com/Fantom-foundation/go-opera/integration" ) var ( @@ -67,11 +75,56 @@ Experimental - try to heal dirty DB. } ) +func makeUncheckedDBsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProducer { + dbsList, _ := integration.SupportedDBs(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs.RuntimeCache) + return dbsList +} + +func makeUncheckedCachedDBsProducers(chaindataDir string) map[multidb.TypeName]kvdb.FullDBProducer { + dbTypes, _ := integration.SupportedDBs(chaindataDir, integration.DBsCacheConfig{ + Table: map[string]integration.DBCacheConfig{ + "": { + Cache: 1024 * opt.MiB, + Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), + }, + }, + }) + wrappedDbTypes := make(map[multidb.TypeName]kvdb.FullDBProducer) + for typ, producer := range dbTypes { + wrappedDbTypes[typ] = cachedproducer.WrapAll(&integration.DummyScopedProducer{IterableDBProducer: producer}) + } + return wrappedDbTypes +} + +func makeCheckedDBsProducers(cfg *config) map[multidb.TypeName]kvdb.IterableDBProducer { + if err := integration.CheckStateInitialized(path.Join(cfg.Node.DataDir, "chaindata"), cfg.DBs); err != nil { + utils.Fatalf(err.Error()) + } + return makeUncheckedDBsProducers(cfg) +} + +func makeDirectDBsProducerFrom(dbsList map[multidb.TypeName]kvdb.IterableDBProducer, cfg *config) kvdb.FullDBProducer { + multiRawDbs, err := integration.MakeDirectMultiProducer(dbsList, cfg.DBs.Routing) + if err != nil { + utils.Fatalf("Failed to initialize multi DB producer: %v", err) + } + return multiRawDbs +} + +func makeDirectDBsProducer(cfg *config) kvdb.FullDBProducer { + dbsList := makeCheckedDBsProducers(cfg) + return makeDirectDBsProducerFrom(dbsList, cfg) +} + +func makeGossipStore(producer kvdb.FlushableDBProducer, cfg *config) *gossip.Store { + return gossip.NewStore(producer, cfg.OperaStore) +} + func compact(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := makeRawDbsProducer(cfg) + rawProducer := makeDirectDBsProducer(cfg) for _, name := range rawProducer.Names() { db, err := rawProducer.OpenDB(name) defer db.Close() diff --git a/cmd/opera/launcher/export.go b/cmd/opera/launcher/export.go index abb3f6d8d..e16411364 100644 --- a/cmd/opera/launcher/export.go +++ b/cmd/opera/launcher/export.go @@ -2,7 +2,6 @@ package launcher import ( "compress/gzip" - "errors" "io" "os" "strconv" @@ -11,8 +10,6 @@ import ( "github.com/Fantom-foundation/lachesis-base/hash" "github.com/Fantom-foundation/lachesis-base/inter/idx" - "github.com/Fantom-foundation/lachesis-base/kvdb" - "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -21,7 +18,6 @@ import ( "gopkg.in/urfave/cli.v1" "github.com/Fantom-foundation/go-opera/gossip" - "github.com/Fantom-foundation/go-opera/integration" ) var ( @@ -40,11 +36,8 @@ func exportEvents(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) defer gdb.Close() fn := ctx.Args().First() @@ -93,30 +86,6 @@ func exportEvents(ctx *cli.Context) error { return nil } -func checkStateInitialized(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer) error { - // if flushID is not written, then previous genesis processing attempt was interrupted - for _, rawProducer := range rawProducers { - for _, name := range rawProducer.Names() { - db, err := rawProducer.OpenDB(name) - if err != nil { - return err - } - flushID, _ := db.Get(integration.FlushIDKey) - _ = db.Close() - if flushID != nil { - return nil - } - } - } - return errors.New("datadir is not initialized") -} - -func makeRawGossipStore(rawProducer kvdb.IterableDBProducer, cfg *config) (*gossip.Store, error) { - dbs := &integration.DummyScopedProducer{rawProducer} - gdb := gossip.NewStore(dbs, cfg.OperaStore) - return gdb, nil -} - // exportTo writer the active chain. func exportTo(w io.Writer, gdb *gossip.Store, from, to idx.Epoch) (err error) { start, reported := time.Now(), time.Time{} diff --git a/cmd/opera/launcher/fake_test.go b/cmd/opera/launcher/fake_test.go index 4af1d732a..74da25047 100644 --- a/cmd/opera/launcher/fake_test.go +++ b/cmd/opera/launcher/fake_test.go @@ -15,7 +15,7 @@ import ( ) func TestFakeNetFlag_NonValidator(t *testing.T) { - // Start a opera console, make sure it's cleaned up and terminate the console + // Start an opera console, make sure it's cleaned up and terminate the console cli := exec(t, "--fakenet", "0/3", "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", @@ -57,7 +57,7 @@ To exit, press ctrl-d } func TestFakeNetFlag_Validator(t *testing.T) { - // Start a opera console, make sure it's cleaned up and terminate the console + // Start an opera console, make sure it's cleaned up and terminate the console cli := exec(t, "--fakenet", "3/3", "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", diff --git a/cmd/opera/launcher/fixdirty.go b/cmd/opera/launcher/fixdirty.go index 20641c95d..3381b3196 100644 --- a/cmd/opera/launcher/fixdirty.go +++ b/cmd/opera/launcher/fixdirty.go @@ -32,8 +32,8 @@ func healDirty(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) log.Info("Opening databases") - dbTypes := makeRawDbsProducers(cfg) - multiProducer := makeMultiRawDbsProducer(dbTypes, cfg) + dbTypes := makeUncheckedDBsProducers(cfg) + multiProducer := makeDirectDBsProducerFrom(dbTypes, cfg) // reverts the gossip database state epochState, err := fixDirtyGossipDb(multiProducer, cfg) @@ -84,12 +84,9 @@ func healDirty(ctx *cli.Context) error { } // fixDirtyGossipDb reverts the gossip database into state, when was one of last epochs sealed -func fixDirtyGossipDb(producer kvdb.IterableDBProducer, cfg *config) ( +func fixDirtyGossipDb(producer kvdb.FlushableDBProducer, cfg *config) ( epochState *iblockproc.EpochState, err error) { - gdb, err := makeRawGossipStore(producer, cfg) // requires FlushIDKey present (not clean) in all dbs - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + gdb := makeGossipStore(producer, cfg) // requires FlushIDKey present (not clean) in all dbs defer gdb.Close() // find the last closed epoch with the state available diff --git a/cmd/opera/launcher/genesiscmd.go b/cmd/opera/launcher/genesiscmd.go index 778e768e1..39e9844da 100644 --- a/cmd/opera/launcher/genesiscmd.go +++ b/cmd/opera/launcher/genesiscmd.go @@ -207,11 +207,8 @@ func exportGenesis(ctx *cli.Context) error { _ = os.RemoveAll(tmpPath) defer os.RemoveAll(tmpPath) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) if gdb.GetHighestLamport() != 0 { log.Warn("Attempting genesis export not in a beginning of an epoch. Genesis file output may contain excessive data.") } diff --git a/cmd/opera/launcher/import.go b/cmd/opera/launcher/import.go index 6c90dadd1..4fe3ded79 100644 --- a/cmd/opera/launcher/import.go +++ b/cmd/opera/launcher/import.go @@ -36,11 +36,8 @@ func importEvm(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) defer gdb.Close() for _, fn := range ctx.Args() { diff --git a/cmd/opera/launcher/snapshotcmd.go b/cmd/opera/launcher/snapshotcmd.go index 91135415b..b20d2772c 100644 --- a/cmd/opera/launcher/snapshotcmd.go +++ b/cmd/opera/launcher/snapshotcmd.go @@ -155,11 +155,8 @@ It's also usable without snapshot enabled. func pruneState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) if gdb.GetGenesisID() == nil { return errors.New("failed to open snapshot tree: genesis is not written") @@ -179,6 +176,7 @@ func pruneState(ctx *cli.Context) error { } root := common.Hash(gdb.GetBlockState().FinalizedStateRoot) var bloom evmpruner.StateBloom + var err error if ctx.Bool(PruneExactCommand.Name) { log.Info("Initializing LevelDB storage of in-use-keys") lset, closer, err := evmpruner.NewLevelDBSet(path.Join(tmpDir, "keys-in-use")) @@ -223,11 +221,8 @@ func pruneState(ctx *cli.Context) error { func verifyState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) genesis := gdb.GetGenesisID() if genesis == nil { @@ -237,7 +232,7 @@ func verifyState(ctx *cli.Context) error { evmStore := gdb.EvmStore() root := common.Hash(gdb.GetBlockState().FinalizedStateRoot) - err = evmStore.GenerateEvmSnapshot(root, false, false) + err := evmStore.GenerateEvmSnapshot(root, false, false) if err != nil { log.Error("Failed to open snapshot tree", "err", err) return err @@ -267,11 +262,8 @@ func verifyState(ctx *cli.Context) error { // contract codes are present. func traverseState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) if gdb.GetGenesisID() == nil { return errors.New("failed to open snapshot tree: genesis is not written") @@ -284,6 +276,7 @@ func traverseState(ctx *cli.Context) error { } var ( root common.Hash + err error ) if ctx.NArg() == 1 { root, err = parseRoot(ctx.Args()[0]) @@ -359,11 +352,8 @@ func traverseState(ctx *cli.Context) error { // but it will check each trie node. func traverseRawState(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawDbs := makeRawDbsProducer(cfg) - gdb, err := makeRawGossipStore(rawDbs, cfg) - if err != nil { - log.Crit("DB opening error", "datadir", cfg.Node.DataDir, "err", err) - } + rawDbs := makeDirectDBsProducer(cfg) + gdb := makeGossipStore(rawDbs, cfg) if gdb.GetGenesisID() == nil { return errors.New("failed to open snapshot tree: genesis is not written") @@ -376,6 +366,7 @@ func traverseRawState(ctx *cli.Context) error { } var ( root common.Hash + err error ) if ctx.NArg() == 1 { root, err = parseRoot(ctx.Args()[0]) diff --git a/integration/assembly.go b/integration/assembly.go index 8a07c141b..85bd1ebcf 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -132,9 +132,22 @@ func migrate(dbs kvdb.FlushableDBProducer, cfg Configs) error { return nil } +func CheckStateInitialized(chaindataDir string, cfg DBsConfig) error { + if isInterrupted(chaindataDir) { + return errors.New("genesis processing isn't finished") + } + runtimeProducers, runtimeScopedProducers := SupportedDBs(chaindataDir, cfg.RuntimeCache) + dbs, err := MakeMultiProducer(runtimeProducers, runtimeScopedProducers, cfg.Routing) + if err != nil { + return err + } + return dbs.Close() +} + func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func() error, error) { // Genesis processing if genesisProc { + setGenesisProcessing(chaindataDir) // use increased DB cache for genesis processing genesisProducers, _ := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) if g == nil { @@ -150,15 +163,14 @@ func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg C return nil, nil, nil, nil, gossip.BlockProc{}, nil, fmt.Errorf("failed to apply genesis state: %v", err) } _ = dbs.Close() + setGenesisComplete(chaindataDir) } // Check DBs are synced { - runtimeProducers, runtimeScopedProducers := SupportedDBs(chaindataDir, cfg.DBs.RuntimeCache) - dbs, err := MakeMultiProducer(runtimeProducers, runtimeScopedProducers, cfg.DBs.Routing) + err := CheckStateInitialized(chaindataDir, cfg.DBs) if err != nil { return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } - _ = dbs.Close() } // Migration { @@ -247,9 +259,6 @@ func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lac dropAllDBsIfInterrupted(chaindataDir) firstLaunch := isEmpty(chaindataDir) MakeDBDirs(chaindataDir) - if firstLaunch { - setGenesisProcessing(chaindataDir) - } engine, vecClock, gdb, cdb, blockProc, closeDBs, err := makeEngine(chaindataDir, g, firstLaunch, cfg) if err != nil { @@ -262,7 +271,6 @@ func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lac rules := gdb.GetRules() genesisID := gdb.GetGenesisID() if firstLaunch { - setGenesisComplete(chaindataDir) log.Info("Applied genesis state", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) } else { log.Info("Genesis is already written", "name", rules.Name, "id", rules.NetworkID, "genesis", genesisID.String()) From 333008dd1a440f038daa42c10acd7630fc8e7d94 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 00:10:01 +0700 Subject: [PATCH 46/87] fix 'db compact' command after multidb changes --- cmd/opera/launcher/dbcmd.go | 41 ++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index 866a53803..6e5355e91 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -124,29 +124,32 @@ func compact(ctx *cli.Context) error { cfg := makeAllConfigs(ctx) - rawProducer := makeDirectDBsProducer(cfg) - for _, name := range rawProducer.Names() { - db, err := rawProducer.OpenDB(name) - defer db.Close() - if err != nil { - log.Error("Cannot open db or db does not exists", "db", name) - return err - } + producers := makeCheckedDBsProducers(cfg) + for typ, p := range producers { + for _, name := range p.Names() { + humanName := path.Join(string(typ), name) + db, err := p.OpenDB(name) + defer db.Close() + if err != nil { + log.Error("Cannot open db or db does not exists", "db", humanName) + return err + } - log.Info("Stats before compaction", "db", name) - showLeveldbStats(db) + log.Info("Stats before compaction", "db", humanName) + showLeveldbStats(db) - log.Info("Triggering compaction", "db", name) - for b := byte(0); b < 255; b++ { - log.Trace("Compacting chain database", "db", name, "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1)) - if err := db.Compact([]byte{b}, []byte{b + 1}); err != nil { - log.Error("Database compaction failed", "err", err) - return err + log.Info("Triggering compaction", "db", humanName) + for b := byte(0); b < 255; b++ { + log.Trace("Compacting chain database", "db", humanName, "range", fmt.Sprintf("0x%0.2X-0x%0.2X", b, b+1)) + if err := db.Compact([]byte{b}, []byte{b + 1}); err != nil { + log.Error("Database compaction failed", "err", err) + return err + } } - } - log.Info("Stats after compaction", "db", name) - showLeveldbStats(db) + log.Info("Stats after compaction", "db", humanName) + showLeveldbStats(db) + } } return nil From 7507e84c0a7679956f78c590950fb0ca0edb8e3d Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 01:16:18 +0700 Subject: [PATCH 47/87] support both leveldb and pebble during multidb migration --- integration/legacy_migrate.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index dbdbff8bc..1e255a9bb 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -2,12 +2,14 @@ package integration import ( "fmt" + "os" "path" "strings" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/Fantom-foundation/lachesis-base/kvdb/batched" "github.com/Fantom-foundation/lachesis-base/kvdb/leveldb" + "github.com/Fantom-foundation/lachesis-base/kvdb/pebble" "github.com/Fantom-foundation/lachesis-base/kvdb/skipkeys" "github.com/Fantom-foundation/lachesis-base/kvdb/table" "github.com/ethereum/go-ethereum/cmd/utils" @@ -77,13 +79,21 @@ func isEmptyDB(db kvdb.Iteratee) bool { return !it.Next() } +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if err != nil { + return false + } + return !info.IsDir() +} + func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { if !isEmpty(path.Join(chaindataDir, "gossip")) { // migrate DB layout cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ Table: map[string]DBCacheConfig{ "": { - Cache: 1024 * opt.MiB, + Cache: 128 * opt.MiB, Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), }, }, @@ -91,7 +101,12 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { if err != nil { return err } - oldDBs := leveldb.NewProducer(chaindataDir, cacheFn) + var oldDBs kvdb.IterableDBProducer + if fileExists(path.Join(chaindataDir, "gossip", "LOG")) { + oldDBs = leveldb.NewProducer(chaindataDir, cacheFn) + } else { + oldDBs = pebble.NewProducer(chaindataDir, cacheFn) + } // move lachesis DB lachesisDB, err := oldDBs.OpenDB("lachesis") From 51b490cf08906a972f9103246d0d92e78e3925f2 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 01:18:13 +0700 Subject: [PATCH 48/87] prevent damaging DBs during multidb migration when disk space is low --- integration/legacy_migrate.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 1e255a9bb..223e49597 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -1,6 +1,7 @@ package integration import ( + "errors" "fmt" "os" "path" @@ -55,9 +56,14 @@ func moveDB(src, dst_ kvdb.Store, name, dir string) error { freeSpace, err := getFreeDiskSpace(dir) if err != nil { log.Error("Failed to retrieve free disk space", "err", err) - } else if len(keys) > 0 && freeSpace < 50*opt.GiB { + } else if freeSpace < 10*opt.GiB { + return errors.New("not enough disk space") + } else if len(keys) > 0 && freeSpace < 100*opt.GiB { log.Warn("Not enough disk space. Trimming source DB records", "space_GB", freeSpace/opt.GiB) - _ = dst.Flush() + err = dst.Flush() + if err != nil { + return err + } _, _ = dst.Stat("async_flush") for i := 0; i < len(keys); i++ { err := src.Delete(keys[i]) From 7ed2dea943ceadb9cf3c9e17e051cdfbbbdd9784 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 01:34:19 +0700 Subject: [PATCH 49/87] refactor multidb migration --- integration/legacy_migrate.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 223e49597..aea46d701 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -114,26 +114,10 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { oldDBs = pebble.NewProducer(chaindataDir, cacheFn) } - // move lachesis DB - lachesisDB, err := oldDBs.OpenDB("lachesis") - if err != nil { - return err - } - newDB, err := dbs.OpenDB("lachesis") - if err != nil { - return err - } - err = moveDB(skipkeys.Wrap(lachesisDB, MetadataPrefix), newDB, "lachesis", chaindataDir) - newDB.Close() - lachesisDB.Close() - if err != nil { - return err - } - lachesisDB.Drop() - - // move lachesis-%d and gossip-%d DBs + var newDB kvdb.Store + // move lachesis, lachesis-%d and gossip-%d DBs for _, name := range oldDBs.Names() { - if strings.HasPrefix(name, "lachesis-") || strings.HasPrefix(name, "gossip-") { + if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { oldDB, err := oldDBs.OpenDB(name) if err != nil { return err From 4d6b08be41e518902c273d76d72c03e7cbd8f068 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 02:11:17 +0700 Subject: [PATCH 50/87] fix compacting source DB during multidb migration --- integration/legacy_migrate.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index aea46d701..2c47a17fd 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -36,7 +36,10 @@ func moveDB(src, dst_ kvdb.Store, name, dir string) error { keys := make([][]byte, 0, batchKeys) values := make([][]byte, 0, batchKeys) it := src.NewIterator(nil, start) - defer it.Release() + defer func() { + // wrap with func because 'it' may be reopened below + it.Release() + }() log.Info("Transforming DB layout", "db", name) for next := true; next; { for len(keys) < batchKeys { @@ -59,19 +62,19 @@ func moveDB(src, dst_ kvdb.Store, name, dir string) error { } else if freeSpace < 10*opt.GiB { return errors.New("not enough disk space") } else if len(keys) > 0 && freeSpace < 100*opt.GiB { - log.Warn("Not enough disk space. Trimming source DB records", "space_GB", freeSpace/opt.GiB) + log.Warn("Running out of disk space. Trimming source DB records", "space_GB", freeSpace/opt.GiB) err = dst.Flush() if err != nil { return err } _, _ = dst.Stat("async_flush") - for i := 0; i < len(keys); i++ { - err := src.Delete(keys[i]) - if err != nil { - return err - } + // release iterator so that DB could release data + it.Release() + for _, k := range keys { + _ = src.Delete(k) } _ = src.Compact(keys[0], keys[len(keys)-1]) + it = src.NewIterator(nil, keys[len(keys)-1]) } keys = keys[:0] values = values[:0] From 685575e439d18b4185da69e12b9034155cfbc367 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 03:26:13 +0700 Subject: [PATCH 51/87] increase db cache during multidb migration --- integration/legacy_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 2c47a17fd..8ddf830f4 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -102,7 +102,7 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ Table: map[string]DBCacheConfig{ "": { - Cache: 128 * opt.MiB, + Cache: 1024 * opt.MiB, Fdlimit: uint64(utils.MakeDatabaseHandles() / 2), }, }, From a63b37257ee1bed194b0e2ed6397e39a12c98486 Mon Sep 17 00:00:00 2001 From: Egor Lysenko <68006922+uprendis@users.noreply.github.com> Date: Wed, 27 Jul 2022 17:40:19 +0700 Subject: [PATCH 52/87] Refactor CheckEvm call Co-authored-by: Ha DANG --- cmd/opera/launcher/check.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/opera/launcher/check.go b/cmd/opera/launcher/check.go index a77393668..03de24224 100644 --- a/cmd/opera/launcher/check.go +++ b/cmd/opera/launcher/check.go @@ -57,8 +57,7 @@ func checkEvm(ctx *cli.Context) error { }) } - err := evms.CheckEvm(checkBlocks) - if err != nil { + if err := evms.CheckEvm(checkBlocks); err != nil { return err } log.Info("EVM storage is verified", "last", prevPoint, "elapsed", common.PrettyDuration(time.Since(start))) From cd47958e772caea80e98a05c01a2395343364809 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Wed, 27 Jul 2022 17:42:11 +0700 Subject: [PATCH 53/87] rm duplicate of dbCommand --- cmd/opera/launcher/launcher.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 3f647e674..370237014 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -214,7 +214,6 @@ func init() { importCommand, exportCommand, checkCommand, - dbCommand, // See snapshot.go snapshotCommand, // See dbcmd.go From 391a9a611f346d0caab798b78ea7e3217af78c81 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Thu, 28 Jul 2022 16:47:56 +0700 Subject: [PATCH 54/87] fix compilation of config_custom_test.go --- cmd/opera/launcher/config_custom_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/opera/launcher/config_custom_test.go b/cmd/opera/launcher/config_custom_test.go index 888057c5c..2db8bfc49 100644 --- a/cmd/opera/launcher/config_custom_test.go +++ b/cmd/opera/launcher/config_custom_test.go @@ -30,7 +30,6 @@ func TestConfigFile(t *testing.T) { Lachesis: abft.DefaultConfig(), LachesisStore: abft.DefaultStoreConfig(cacheRatio), VectorClock: vecmt.DefaultConfig(cacheRatio), - cachescale: cacheRatio, } canonical := func(nn []*enode.Node) []*enode.Node { From 53eddab65397544f18c3a6e378f366a4ed645f5c Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Thu, 28 Jul 2022 16:56:43 +0700 Subject: [PATCH 55/87] fix RecoverEVM with pruned blocks --- gossip/c_block_callbacks.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gossip/c_block_callbacks.go b/gossip/c_block_callbacks.go index 735e5b760..f0c4fc65f 100644 --- a/gossip/c_block_callbacks.go +++ b/gossip/c_block_callbacks.go @@ -490,6 +490,9 @@ func (s *Service) RecoverEVM() { start := s.store.GetLatestBlockIndex() for b := start; b >= 1 && b > start-20000; b-- { block := s.store.GetBlock(b) + if block == nil { + break + } if s.store.evm.HasStateDB(block.Root) { if b != start { s.Log.Warn("Reexecuting blocks after abrupt stopping", "from", b, "to", start) From c15d152113a080417cf4fbeda78dc97dc9f76d5c Mon Sep 17 00:00:00 2001 From: Jan Kalina Date: Tue, 2 Aug 2022 21:42:26 +0200 Subject: [PATCH 56/87] Fix block processing metrics, remove unused metrics --- gossip/c_block_callbacks.go | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/gossip/c_block_callbacks.go b/gossip/c_block_callbacks.go index f0c4fc65f..6e2dabc89 100644 --- a/gossip/c_block_callbacks.go +++ b/gossip/c_block_callbacks.go @@ -50,14 +50,8 @@ var ( snapshotCommitTimer = metrics.GetOrRegisterTimer("chain/snapshot/commits", nil) blockInsertTimer = metrics.GetOrRegisterTimer("chain/inserts", nil) - blockValidationTimer = metrics.GetOrRegisterTimer("chain/validation", nil) blockExecutionTimer = metrics.GetOrRegisterTimer("chain/execution", nil) blockWriteTimer = metrics.GetOrRegisterTimer("chain/write", nil) - - _ = metrics.GetOrRegisterMeter("chain/reorg/executes", nil) - _ = metrics.GetOrRegisterMeter("chain/reorg/add", nil) - _ = metrics.GetOrRegisterMeter("chain/reorg/drop", nil) - _ = metrics.GetOrRegisterMeter("chain/reorg/invalidTx", nil) ) type ExtendedTxPosition struct { @@ -256,7 +250,7 @@ func consensusCallbackBeginBlockFn( } evmProcessor := blockProc.EVMModule.Start(blockCtx, statedb, evmStateReader, onNewLogAll, es.Rules, es.Rules.EvmChainConfig(store.GetUpgradeHeights())) - substart := time.Now() + executionStart := time.Now() // Execute pre-internal transactions preInternalTxs := blockProc.PreTxTransactor.PopInternalTxs(blockCtx, bs, es, sealing, statedb) @@ -399,14 +393,13 @@ func consensusCallbackBeginBlockFn( storageUpdateTimer.Update(statedb.StorageUpdates) snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) - triehash := statedb.AccountHashes + statedb.StorageHashes // save to not double count in validation - trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates - trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates - blockExecutionTimer.Update(time.Since(substart) - trieproc - triehash) - // Update the metrics touched during block validation accountHashTimer.Update(statedb.AccountHashes) storageHashTimer.Update(statedb.StorageHashes) - blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) + triehash := statedb.AccountHashes + statedb.StorageHashes + trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates + trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates + blockExecutionTimer.Update(time.Since(executionStart) - trieproc - triehash) + // Update the metrics touched by new block headBlockGauge.Update(int64(blockCtx.Idx)) headHeaderGauge.Update(int64(blockCtx.Idx)) @@ -424,12 +417,14 @@ func consensusCallbackBeginBlockFn( feed.newLogs.Send(logs) } + commitStart := time.Now() store.commitEVM(false) + // Update the metrics touched during block commit accountCommitTimer.Update(statedb.AccountCommits) storageCommitTimer.Update(statedb.StorageCommits) snapshotCommitTimer.Update(statedb.SnapshotCommits) - blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) + blockWriteTimer.Update(time.Since(commitStart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) blockInsertTimer.UpdateSince(start) now := time.Now() From 9593cf15dd255910e8a799de3e17f4726c36e42a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Tue, 26 Jul 2022 11:20:45 +0700 Subject: [PATCH 57/87] fix nil cachescale issue after loading file config --- cmd/opera/launcher/config.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index fffb308c3..cd5dd72f1 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -369,7 +369,6 @@ func mayMakeAllConfigs(ctx *cli.Context) (*config, error) { Lachesis: abft.DefaultConfig(), LachesisStore: abft.DefaultStoreConfig(cacheRatio), VectorClock: vecmt.DefaultConfig(cacheRatio), - cachescale: cacheRatio, } if ctx.GlobalIsSet(FakeNetFlag.Name) { @@ -389,6 +388,8 @@ func mayMakeAllConfigs(ctx *cli.Context) (*config, error) { } } + cfg.cachescale = cacheRatio + // Apply flags (high priority) var err error cfg.Opera, err = gossipConfigWithFlags(ctx, cfg.Opera) From a5179eff892df312fa5cf668b8bd8c545de77eeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Thu, 4 Aug 2022 06:42:17 +0700 Subject: [PATCH 58/87] upgrade ethereum --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 27b27e284..b8a2a2898 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,6 @@ require ( gopkg.in/urfave/cli.v1 v1.20.0 ) -replace github.com/ethereum/go-ethereum => github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8 +replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 replace github.com/dvyukov/go-fuzz => github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f diff --git a/go.sum b/go.sum index 2fb27dbf7..02b0a12f3 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 h1:QAOlomYdCyfP2Z8fRFEfzYYPxMLqUMbvppOGhOoVarM= +github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582 h1:gDEbOynFwS7sp19XrtWnA1nEhSIXXO5DuAuT5h/nZgY= github.com/Fantom-foundation/lachesis-base v0.0.0-20220103160934-6b4931c60582/go.mod h1:E6+2LOvgADwSOv0U5YXhRJz4PlX8qJxvq8M93AOC1tM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -218,8 +220,6 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKh github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f h1:GQpcb9ywkaawJfZCFeIcrSWlsZFRTsig42e03dkzc+I= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f/go.mod h1:Q5On640X2Z0YzKOijx9GVhUu/kvHnk9aKoWGMlRDMtc= -github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8 h1:S2eeDb/NAjA2ylCFijPU2lAnKlpcrINVL+DfEDY0XYo= -github.com/hadv/go-ethereum v1.10.9-0.20220726114552-2b6434a5e9c8/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= From e5491ba68057f431a16320dfbf2f701940467cb7 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 7 Aug 2022 15:02:36 +0300 Subject: [PATCH 59/87] de-synchronize flushes between nodes --- gossip/config.go | 2 +- gossip/service.go | 4 ++-- gossip/store.go | 13 ++++++++----- utils/randat/rand_at.go | 26 ++++++++++++++++++++++++++ 4 files changed, 37 insertions(+), 8 deletions(-) create mode 100644 utils/randat/rand_at.go diff --git a/gossip/config.go b/gossip/config.go index ebbd5d34d..0488a17b4 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -269,7 +269,7 @@ func DefaultStoreConfig(scale cachescale.Func) StoreConfig { LlrEpochVotesIndexes: scale.I(5), }, EVM: evmstore.DefaultStoreConfig(scale), - MaxNonFlushedSize: 20*opt.MiB + scale.I(2*opt.MiB), + MaxNonFlushedSize: 21*opt.MiB + scale.I(2*opt.MiB), MaxNonFlushedPeriod: 30 * time.Minute, } } diff --git a/gossip/service.go b/gossip/service.go index 464d6f346..8016687fe 100644 --- a/gossip/service.go +++ b/gossip/service.go @@ -310,13 +310,13 @@ func (s *Service) makePeriodicFlusher() PeriodicFlusher { commitNeeded: func() bool { // use slightly higher size threshold to avoid locking the mutex/wg pair and hurting events/blocks concurrency // PeriodicFlusher should mostly commit only data generated by async EVM snapshots generation - return s.store.isCommitNeeded(120, 100) + return s.store.isCommitNeeded(1200, 1000) }, commit: func() { s.engineMu.Lock() defer s.engineMu.Unlock() // Note: blockProcWg.Wait() is already called by s.commit - if s.store.isCommitNeeded(120, 100) { + if s.store.isCommitNeeded(1200, 1000) { s.commit(false) } }, diff --git a/gossip/store.go b/gossip/store.go index d972bc8c4..e9441c051 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -18,6 +18,7 @@ import ( "github.com/Fantom-foundation/go-opera/logger" "github.com/Fantom-foundation/go-opera/utils/adapters/snap2kvdb" "github.com/Fantom-foundation/go-opera/utils/eventid" + "github.com/Fantom-foundation/go-opera/utils/randat" "github.com/Fantom-foundation/go-opera/utils/rlpstore" "github.com/Fantom-foundation/go-opera/utils/switchable" ) @@ -165,14 +166,16 @@ func (s *Store) Close() { } func (s *Store) IsCommitNeeded() bool { - return s.isCommitNeeded(100, 100) + // randomize flushing criteria for each epoch so that nodes would desynchronize flushes + ratio := 900 + randat.RandAt(uint64(s.GetEpoch()))%100 + return s.isCommitNeeded(ratio, ratio) } -func (s *Store) isCommitNeeded(sc, tc int) bool { - period := s.cfg.MaxNonFlushedPeriod * time.Duration(sc) / 100 - size := (s.cfg.MaxNonFlushedSize / 2) * tc / 100 +func (s *Store) isCommitNeeded(sc, tc uint64) bool { + period := s.cfg.MaxNonFlushedPeriod * time.Duration(sc) / 1000 + size := (uint64(s.cfg.MaxNonFlushedSize) / 2) * tc / 1000 return time.Since(s.prevFlushTime) > period || - s.dbs.NotFlushedSizeEst() > size + uint64(s.dbs.NotFlushedSizeEst()) > size } // commitEVM commits EVM storage diff --git a/utils/randat/rand_at.go b/utils/randat/rand_at.go new file mode 100644 index 000000000..59e29d719 --- /dev/null +++ b/utils/randat/rand_at.go @@ -0,0 +1,26 @@ +package randat + +import ( + "math/rand" +) + +type cached struct { + seed uint64 + r uint64 +} + +var ( + gSeed = rand.Int63() + cache = cached{} +) + +// RandAt returns random number with seed +// Not safe for concurrent use +func RandAt(seed uint64) uint64 { + if seed != 0 && cache.seed == seed { + return cache.r + } + cache.seed = seed + cache.r = rand.New(rand.NewSource(gSeed ^ int64(seed))).Uint64() + return cache.r +} From 9ea3863b15a8d2ff317e490ce256dba31ae1685b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 7 Aug 2022 15:04:25 +0300 Subject: [PATCH 60/87] update lachesis-base version --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dbbfb559e..3192c9f57 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/Fantom-foundation/go-opera go 1.14 require ( - github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194 + github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a github.com/allegro/bigcache v1.2.1 // indirect github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40 // indirect github.com/cespare/cp v1.1.1 diff --git a/go.sum b/go.sum index a50e9a93f..300da858e 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 h1:QAOlomYdCyfP2Z8fRFEfzYYPxMLqUMbvppOGhOoVarM= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194 h1:5GX97Ta7SeIyrpct33VT1UxOBGO7gBxq2FkPvAdsJgc= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220725175240-f0338b7a6194/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a h1:SGDwBtqXnnZEgcnpqbv3h0Hhag9ub9qtTtYjvCadbvg= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= From 7ceceb6cc02e7d8d3efcdc15b3e101cbebba2212 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 9 Aug 2022 00:31:09 +0300 Subject: [PATCH 61/87] fix --config flag with partial config --- cmd/opera/launcher/config_custom.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/opera/launcher/config_custom.go b/cmd/opera/launcher/config_custom.go index 69e50e6c6..b842e5b06 100644 --- a/cmd/opera/launcher/config_custom.go +++ b/cmd/opera/launcher/config_custom.go @@ -61,7 +61,7 @@ func (c *config) UnmarshalTOML(input []byte) error { defaultBootstrapNodes, defaultBootstrapNodesV5 := isBootstrapNodesDefault(ast) type rawCfg config - var raw rawCfg + var raw = rawCfg(*c) err = toml.UnmarshalTable(ast, &raw) if err != nil { return err From 2d66c487e32ae5a3e4562cbbacc82eee44536265 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 9 Aug 2022 12:36:16 +0300 Subject: [PATCH 62/87] fix compaction during multidb migration --- go.mod | 2 +- go.sum | 4 +- integration/legacy_migrate.go | 243 +++++++++++++++++++++------------- 3 files changed, 154 insertions(+), 95 deletions(-) diff --git a/go.mod b/go.mod index 3192c9f57..f1b957075 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/Fantom-foundation/go-opera go 1.14 require ( - github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a + github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb github.com/allegro/bigcache v1.2.1 // indirect github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40 // indirect github.com/cespare/cp v1.1.1 diff --git a/go.sum b/go.sum index 300da858e..fe4b9f44b 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 h1:QAOlomYdCyfP2Z8fRFEfzYYPxMLqUMbvppOGhOoVarM= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a h1:SGDwBtqXnnZEgcnpqbv3h0Hhag9ub9qtTtYjvCadbvg= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220801182728-3625ff2a857a/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb h1:kyCcawuYwMe+FJy0KYRFqGWrHYNoGixO9QTIceD8ngM= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 8ddf830f4..d53569898 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -19,69 +19,108 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" ) -func moveDB(src, dst_ kvdb.Store, name, dir string) error { - dst := batched.Wrap(dst_) - defer dst.Flush() - - // start from previously written data, if any +func lastKey(db kvdb.Store) []byte { var start []byte - for b := 0xff; b > 0; b-- { - if !isEmptyDB(table.New(dst, []byte{byte(b)})) { - start = []byte{byte(b)} - break + for { + for b := 0xff; b >= 0; b-- { + if !isEmptyDB(table.New(db, append(start, byte(b)))) { + start = append(start, byte(b)) + break + } + if b == 0 { + return start + } + } + } +} + +type transformTask struct { + openSrc func() kvdb.Store + openDst func() kvdb.Store + name string + dir string + dropSrc bool +} + +func transform(m transformTask) error { + openDst := func() *batched.Store { + return batched.Wrap(m.openDst()) + } + openSrc := func() *batched.Store { + return batched.Wrap(m.openSrc()) + } + src := openSrc() + defer func() { + _ = src.Close() + if m.dropSrc { + src.Drop() } + }() + if isEmptyDB(src) { + return nil } + dst := openDst() - const batchKeys = 1000000 + const batchKeys = 5000000 keys := make([][]byte, 0, batchKeys) - values := make([][]byte, 0, batchKeys) - it := src.NewIterator(nil, start) + // start from previously written data, if any + it := src.NewIterator(nil, lastKey(dst)) defer func() { - // wrap with func because 'it' may be reopened below + // wrap with func because DBs may be reopened below it.Release() + _ = dst.Close() }() - log.Info("Transforming DB layout", "db", name) + log.Info("Transforming DB layout", "db", m.name) for next := true; next; { for len(keys) < batchKeys { next = it.Next() if !next { break } - keys = append(keys, common.CopyBytes(it.Key())) - values = append(values, common.CopyBytes(it.Value())) - } - for i := 0; i < len(keys); i++ { - err := dst.Put(keys[i], values[i]) + err := dst.Put(it.Key(), it.Value()) if err != nil { - return err + utils.Fatalf("Failed to put: %v", err) } + keys = append(keys, common.CopyBytes(it.Key())) + } + err := dst.Flush() + if err != nil { + utils.Fatalf("Failed to flush: %v", err) } - freeSpace, err := getFreeDiskSpace(dir) + freeSpace, err := getFreeDiskSpace(m.dir) if err != nil { log.Error("Failed to retrieve free disk space", "err", err) - } else if freeSpace < 10*opt.GiB { + } else if freeSpace < 20*opt.GiB { return errors.New("not enough disk space") } else if len(keys) > 0 && freeSpace < 100*opt.GiB { log.Warn("Running out of disk space. Trimming source DB records", "space_GB", freeSpace/opt.GiB) - err = dst.Flush() - if err != nil { - return err - } _, _ = dst.Stat("async_flush") // release iterator so that DB could release data it.Release() + // erase data from src for _, k := range keys { _ = src.Delete(k) } _ = src.Compact(keys[0], keys[len(keys)-1]) + // reopen source DB too if it doesn't release data + if freeSpace < 50*opt.GiB { + _ = src.Close() + src = openSrc() + } it = src.NewIterator(nil, keys[len(keys)-1]) } keys = keys[:0] - values = values[:0] } return nil } +func mustTransform(m transformTask) { + err := transform(m) + if err != nil { + utils.Fatalf(err.Error()) + } +} + func isEmptyDB(db kvdb.Iteratee) bool { it := db.NewIterator(nil, nil) defer it.Release() @@ -96,6 +135,26 @@ func fileExists(filename string) bool { return !info.IsDir() } +type closebaleTable struct { + *table.Table + backend kvdb.Store +} + +func (s *closebaleTable) Close() error { + return s.backend.Close() +} + +func (s *closebaleTable) Drop() { + s.backend.Drop() +} + +func newClosableTable(db kvdb.Store, prefix []byte) *closebaleTable { + return &closebaleTable{ + Table: table.New(db, prefix), + backend: db, + } +} + func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { if !isEmpty(path.Join(chaindataDir, "gossip")) { // migrate DB layout @@ -116,83 +175,83 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { } else { oldDBs = pebble.NewProducer(chaindataDir, cacheFn) } + openOldDB := func(name string) kvdb.Store { + db, err := oldDBs.OpenDB(name) + if err != nil { + utils.Fatalf("Failed to open %s old DB: %v", name, err) + } + return db + } + openNewDB := func(name string) kvdb.Store { + db, err := dbs.OpenDB(name) + if err != nil { + utils.Fatalf("Failed to open %s DB: %v", name, err) + } + return db + } - var newDB kvdb.Store // move lachesis, lachesis-%d and gossip-%d DBs for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { - oldDB, err := oldDBs.OpenDB(name) - if err != nil { - return err - } - newDB, err = dbs.OpenDB(name) - if err != nil { - return err - } - err = moveDB(skipkeys.Wrap(oldDB, MetadataPrefix), newDB, name, chaindataDir) - newDB.Close() - oldDB.Close() - if err != nil { - return err - } - oldDB.Drop() + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return skipkeys.Wrap(openOldDB(name), MetadataPrefix) + }, + openDst: func() kvdb.Store { + return openNewDB(name) + }, + name: name, + dir: chaindataDir, + }) } } // move gossip DB - gossipDB, err := oldDBs.OpenDB("gossip") - if err != nil { - return err - } - if err = func() error { - defer gossipDB.Close() + // move logs - // move logs - newDB, err = dbs.OpenDB("evm-logs/r") - if err != nil { - return err - } - err = moveDB(table.New(gossipDB, []byte("Lr")), newDB, "gossip/Lr", chaindataDir) - newDB.Close() - if err != nil { - return err - } - - newDB, err = dbs.OpenDB("evm-logs/t") - err = moveDB(table.New(gossipDB, []byte("Lt")), newDB, "gossip/Lt", chaindataDir) - newDB.Close() - if err != nil { - return err - } + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return newClosableTable(openOldDB("gossip"), []byte("Lr")) + }, + openDst: func() kvdb.Store { + return openNewDB("evm-logs/r") + }, + name: "gossip/Lr", + dir: chaindataDir, + }) + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return newClosableTable(openOldDB("gossip"), []byte("Lt")) + }, + openDst: func() kvdb.Store { + return openNewDB("evm-logs/t") + }, + name: "gossip/Lt", + dir: chaindataDir, + }) - // skip 0 prefix, as it contains flushID - for b := 1; b <= 0xff; b++ { - if b == int('L') { - // logs are already moved above - continue - } - if isEmptyDB(table.New(gossipDB, []byte{byte(b)})) { - continue - } - if b == int('M') || b == int('r') || b == int('x') || b == int('X') { - newDB, err = dbs.OpenDB("evm/" + string([]byte{byte(b)})) - } else { - newDB, err = dbs.OpenDB("gossip/" + string([]byte{byte(b)})) - } - if err != nil { - return err - } - err = moveDB(table.New(gossipDB, []byte{byte(b)}), newDB, fmt.Sprintf("gossip/%c", rune(b)), chaindataDir) - newDB.Close() - if err != nil { - return err - } + // skip 0 prefix, as it contains flushID + for b := 1; b <= 0xff; b++ { + if b == int('L') { + // logs are already moved above + continue } - return nil - }(); err != nil { - return err + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return newClosableTable(openOldDB("gossip"), []byte{byte(b)}) + }, + openDst: func() kvdb.Store { + if b == int('M') || b == int('r') || b == int('x') || b == int('X') { + return openNewDB("evm/" + string([]byte{byte(b)})) + } else { + return openNewDB("gossip/" + string([]byte{byte(b)})) + } + }, + name: fmt.Sprintf("gossip/%c", rune(b)), + dir: chaindataDir, + dropSrc: b == 0xff, + }) } - gossipDB.Drop() } return nil From 5eb2a23302f0e4f7219e760bc1336c9722e212cc Mon Sep 17 00:00:00 2001 From: Jan Kalina Date: Tue, 9 Aug 2022 12:55:28 +0200 Subject: [PATCH 63/87] Rename LLR prefixes --- gossip/store.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/gossip/store.go b/gossip/store.go index e9441c051..63168afcc 100644 --- a/gossip/store.go +++ b/gossip/store.go @@ -51,15 +51,15 @@ type Store struct { // API-only BlockHashes kvdb.Store `table:"B"` - LlrState kvdb.Store `table:"!"` - LlrBlockResults kvdb.Store `table:"@"` - LlrEpochResults kvdb.Store `table:"#"` - LlrBlockVotes kvdb.Store `table:"$"` - LlrBlockVotesIndex kvdb.Store `table:"%"` - LlrEpochVotes kvdb.Store `table:"^"` - LlrEpochVoteIndex kvdb.Store `table:"&"` - LlrLastBlockVotes kvdb.Store `table:"*"` - LlrLastEpochVote kvdb.Store `table:"("` + LlrState kvdb.Store `table:"S"` + LlrBlockResults kvdb.Store `table:"R"` + LlrEpochResults kvdb.Store `table:"Q"` + LlrBlockVotes kvdb.Store `table:"T"` + LlrBlockVotesIndex kvdb.Store `table:"J"` + LlrEpochVotes kvdb.Store `table:"E"` + LlrEpochVoteIndex kvdb.Store `table:"I"` + LlrLastBlockVotes kvdb.Store `table:"G"` + LlrLastEpochVote kvdb.Store `table:"F"` } prevFlushTime time.Time From f5ecc551d5f7d89c752a3f8865f85c5d3bd59324 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Thu, 11 Aug 2022 21:49:22 +0300 Subject: [PATCH 64/87] use L prefix for lachesis DB --- integration/routing.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/routing.go b/integration/routing.go index b1e3b8c5b..dea18ef0e 100644 --- a/integration/routing.go +++ b/integration/routing.go @@ -22,7 +22,7 @@ func DefaultRoutingConfig() RoutingConfig { "lachesis": { Type: "pebble-fsh", Name: "main", - Table: ">", + Table: "L", }, "gossip": { Type: "pebble-fsh", From 876b966ac4239dea428a0ba21964a5e562b61ff6 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Thu, 11 Aug 2022 22:04:41 +0300 Subject: [PATCH 65/87] multidb migration migrates non-alphanumerical to alphanumerical --- integration/legacy_migrate.go | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index d53569898..4cb621798 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -155,6 +155,37 @@ func newClosableTable(db kvdb.Store, prefix []byte) *closebaleTable { } } +func translateGossipPrefix(p byte) byte { + if p == byte('!') { + return byte('S') + } + if p == byte('@') { + return byte('R') + } + if p == byte('#') { + return byte('Q') + } + if p == byte('$') { + return byte('T') + } + if p == byte('%') { + return byte('J') + } + if p == byte('^') { + return byte('E') + } + if p == byte('&') { + return byte('I') + } + if p == byte('*') { + return byte('G') + } + if p == byte('(') { + return byte('F') + } + return p +} + func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { if !isEmpty(path.Join(chaindataDir, "gossip")) { // migrate DB layout @@ -207,8 +238,8 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { } // move gossip DB - // move logs + // move logs mustTransform(transformTask{ openSrc: func() kvdb.Store { return newClosableTable(openOldDB("gossip"), []byte("Lr")) @@ -244,7 +275,7 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { if b == int('M') || b == int('r') || b == int('x') || b == int('X') { return openNewDB("evm/" + string([]byte{byte(b)})) } else { - return openNewDB("gossip/" + string([]byte{byte(b)})) + return openNewDB("gossip/" + string([]byte{translateGossipPrefix(byte(b))})) } }, name: fmt.Sprintf("gossip/%c", rune(b)), From f0957410693dad28a2dfc602715e4d8d7551f994 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Fri, 12 Aug 2022 17:27:14 +0300 Subject: [PATCH 66/87] update go-ethereum to v1.10.8-ftm-rc7 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f1b957075..d244feaea 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,6 @@ require ( gopkg.in/urfave/cli.v1 v1.20.0 ) -replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 +replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 replace github.com/dvyukov/go-fuzz => github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f diff --git a/go.sum b/go.sum index fe4b9f44b..aef4dd77b 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,8 @@ github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mo github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6 h1:QAOlomYdCyfP2Z8fRFEfzYYPxMLqUMbvppOGhOoVarM= -github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc6/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= +github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 h1:wt1bhCeb3E0qzDV2iuv2qYlNDO/j8DFJfzVHSnevVOU= +github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb h1:kyCcawuYwMe+FJy0KYRFqGWrHYNoGixO9QTIceD8ngM= github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= From efe49f1ba7606059c6db7617d9ba46777b87541b Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Fri, 12 Aug 2022 17:36:24 +0300 Subject: [PATCH 67/87] version up --- version/version.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version/version.go b/version/version.go index e369f6650..5334017e6 100644 --- a/version/version.go +++ b/version/version.go @@ -10,8 +10,8 @@ import ( func init() { params.VersionMajor = 1 // Major version component of the current release params.VersionMinor = 1 // Minor version component of the current release - params.VersionPatch = 1 // Patch version component of the current release - params.VersionMeta = "rc.2" // Version metadata to append to the version string + params.VersionPatch = 2 // Patch version component of the current release + params.VersionMeta = "rc.1" // Version metadata to append to the version string } func BigToString(b *big.Int) string { From 704d0aece2aa6ecc49dc48bc99c147fa26bc39af Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sat, 27 Aug 2022 01:58:52 +0700 Subject: [PATCH 68/87] add support for legacy DB presets --- cmd/opera/launcher/config.go | 31 +++ cmd/opera/launcher/launcher.go | 2 + integration/assembly.go | 5 +- integration/db.go | 78 +----- integration/legacy_migrate.go | 129 ++++++---- integration/presets.go | 426 +++++++++++++++++++++++++++++++++ integration/routing.go | 46 ---- 7 files changed, 544 insertions(+), 173 deletions(-) create mode 100644 integration/presets.go diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index f315d4eac..338c2be0a 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -111,6 +111,15 @@ var ( Name: "exitwhensynced.epoch", Usage: "Exits after synchronisation reaches the required epoch", } + + DBMigrationModeFlag = cli.StringFlag{ + Name: "db.migration.mode", + Usage: "MultiDB migration mode ('reformat' or 'rebuild')", + } + DBPresetFlag = cli.StringFlag{ + Name: "db.preset", + Usage: "DBs layout preset ('pbl-1' or 'ldb-1' or 'legacy-ldb' or 'legacy-pbl')", + } ) type GenesisTemplate struct { @@ -336,6 +345,27 @@ func gossipStoreConfigWithFlags(ctx *cli.Context, src gossip.StoreConfig) (gossi return cfg, nil } +func setDBConfig(ctx *cli.Context, cfg integration.DBsConfig, cacheRatio cachescale.Func) integration.DBsConfig { + if ctx.GlobalIsSet(DBPresetFlag.Name) { + preset := ctx.GlobalString(DBPresetFlag.Name) + if preset == "pbl-1" { + cfg = integration.Pbl1DBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) + } else if preset == "ldb-1" { + utils.Fatalf("--%s: ldb-1 preset is not unsupported yet", DBPresetFlag.Name) + } else if preset == "legacy-ldb" { + cfg = integration.LdbLegacyDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) + } else if preset == "legacy-pbl" { + cfg = integration.PblLegacyDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) + } else { + utils.Fatalf("--%s must be 'pbl-1', 'ldb-1', 'legacy-pbl' or 'legacy-ldb'", DBPresetFlag.Name) + } + } + if ctx.GlobalIsSet(DBMigrationModeFlag.Name) { + cfg.MigrationMode = ctx.GlobalString(DBMigrationModeFlag.Name) + } + return cfg +} + func nodeConfigWithFlags(ctx *cli.Context, cfg node.Config) node.Config { utils.SetNodeConfig(ctx, &cfg) @@ -411,6 +441,7 @@ func mayMakeAllConfigs(ctx *cli.Context) (*config, error) { return nil, err } cfg.Node = nodeConfigWithFlags(ctx, cfg.Node) + cfg.DBs = setDBConfig(ctx, cfg.DBs, cacheRatio) err = setValidator(ctx, &cfg.Emitter) if err != nil { diff --git a/cmd/opera/launcher/launcher.go b/cmd/opera/launcher/launcher.go index 9310df924..efade3247 100644 --- a/cmd/opera/launcher/launcher.go +++ b/cmd/opera/launcher/launcher.go @@ -122,6 +122,8 @@ func initFlags() { validatorPasswordFlag, SyncModeFlag, GCModeFlag, + DBPresetFlag, + DBMigrationModeFlag, } legacyRpcFlags = []cli.Flag{ utils.NoUSBFlag, diff --git a/integration/assembly.go b/integration/assembly.go index 85bd1ebcf..7c9915875 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -242,6 +242,9 @@ func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg C // MakeEngine makes consensus engine from config. func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lachesis, *vecmt.Index, *gossip.Store, *abft.Store, gossip.BlockProc, func() error) { // Legacy DBs migrate + if cfg.DBs.MigrationMode != "reformat" && cfg.DBs.MigrationMode != "rebuild" && cfg.DBs.MigrationMode != "" { + utils.Fatalf("MigrationMode must be 'reformat' or 'rebuild'") + } if !isEmpty(path.Join(chaindataDir, "gossip")) { MakeDBDirs(chaindataDir) genesisProducers, _ := SupportedDBs(chaindataDir, cfg.DBs.GenesisCache) @@ -249,7 +252,7 @@ func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lac if err != nil { utils.Fatalf("Failed to make engine: %v", err) } - err = migrateLegacyDBs(chaindataDir, dbs) + err = migrateLegacyDBs(chaindataDir, dbs, cfg.DBs.MigrationMode) _ = dbs.Close() if err != nil { utils.Fatalf("Failed to migrate state: %v", err) diff --git a/integration/db.go b/integration/db.go index 99f39b10f..1a24e265c 100644 --- a/integration/db.go +++ b/integration/db.go @@ -18,16 +18,16 @@ import ( "github.com/Fantom-foundation/lachesis-base/utils/fmtfilter" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/log" - "github.com/syndtr/goleveldb/leveldb/opt" "github.com/Fantom-foundation/go-opera/gossip" "github.com/Fantom-foundation/go-opera/utils/dbutil/asyncflushproducer" ) type DBsConfig struct { - Routing RoutingConfig - RuntimeCache DBsCacheConfig - GenesisCache DBsCacheConfig + Routing RoutingConfig + RuntimeCache DBsCacheConfig + GenesisCache DBsCacheConfig + MigrationMode string } type DBCacheConfig struct { @@ -39,75 +39,7 @@ type DBsCacheConfig struct { Table map[string]DBCacheConfig } -func DefaultDBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { - return DBsConfig{ - Routing: DefaultRoutingConfig(), - RuntimeCache: DefaultRuntimeDBsCacheConfig(scale, fdlimit), - GenesisCache: DefaultGenesisDBsCacheConfig(scale, fdlimit), - } -} - -func DefaultRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { - return DBsCacheConfig{ - Table: map[string]DBCacheConfig{ - "evm-data": { - Cache: scale(242 * opt.MiB), - Fdlimit: fdlimit*242/700 + 1, - }, - "evm-logs": { - Cache: scale(110 * opt.MiB), - Fdlimit: fdlimit*110/700 + 1, - }, - "main": { - Cache: scale(186 * opt.MiB), - Fdlimit: fdlimit*186/700 + 1, - }, - "events": { - Cache: scale(87 * opt.MiB), - Fdlimit: fdlimit*87/700 + 1, - }, - "epoch-%d": { - Cache: scale(75 * opt.MiB), - Fdlimit: fdlimit*75/700 + 1, - }, - "": { - Cache: 64 * opt.MiB, - Fdlimit: fdlimit/100 + 1, - }, - }, - } -} - -func DefaultGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { - return DBsCacheConfig{ - Table: map[string]DBCacheConfig{ - "main": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, - }, - "evm-data": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, - }, - "evm-logs": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, - }, - "events": { - Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, - }, - "epoch-%d": { - Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, - }, - "": { - Cache: 16 * opt.MiB, - Fdlimit: fdlimit/100 + 1, - }, - }, - } -} +var DefaultDBsConfig = Pbl1DBsConfig func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName]kvdb.IterableDBProducer, map[multidb.TypeName]kvdb.FullDBProducer) { if chaindataDir == "inmemory" || chaindataDir == "" { diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 4cb621798..019bf1036 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -186,7 +186,7 @@ func translateGossipPrefix(p byte) byte { return p } -func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { +func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode string) error { if !isEmpty(path.Join(chaindataDir, "gossip")) { // migrate DB layout cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ @@ -201,10 +201,13 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { return err } var oldDBs kvdb.IterableDBProducer + var oldDBsType string if fileExists(path.Join(chaindataDir, "gossip", "LOG")) { oldDBs = leveldb.NewProducer(chaindataDir, cacheFn) + oldDBsType = "ldb" } else { oldDBs = pebble.NewProducer(chaindataDir, cacheFn) + oldDBsType = "pbl" } openOldDB := func(name string) kvdb.Store { db, err := oldDBs.OpenDB(name) @@ -221,67 +224,87 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer) error { return db } - // move lachesis, lachesis-%d and gossip-%d DBs - for _, name := range oldDBs.Names() { - if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { - mustTransform(transformTask{ - openSrc: func() kvdb.Store { - return skipkeys.Wrap(openOldDB(name), MetadataPrefix) - }, - openDst: func() kvdb.Store { - return openNewDB(name) - }, - name: name, - dir: chaindataDir, - }) + if mode == "rebuild" { + // move lachesis, lachesis-%d and gossip-%d DBs + for _, name := range oldDBs.Names() { + if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return skipkeys.Wrap(openOldDB(name), MetadataPrefix) + }, + openDst: func() kvdb.Store { + return openNewDB(name) + }, + name: name, + dir: chaindataDir, + }) + } } - } - // move gossip DB + // move gossip DB - // move logs - mustTransform(transformTask{ - openSrc: func() kvdb.Store { - return newClosableTable(openOldDB("gossip"), []byte("Lr")) - }, - openDst: func() kvdb.Store { - return openNewDB("evm-logs/r") - }, - name: "gossip/Lr", - dir: chaindataDir, - }) - mustTransform(transformTask{ - openSrc: func() kvdb.Store { - return newClosableTable(openOldDB("gossip"), []byte("Lt")) - }, - openDst: func() kvdb.Store { - return openNewDB("evm-logs/t") - }, - name: "gossip/Lt", - dir: chaindataDir, - }) - - // skip 0 prefix, as it contains flushID - for b := 1; b <= 0xff; b++ { - if b == int('L') { - // logs are already moved above - continue - } + // move logs mustTransform(transformTask{ openSrc: func() kvdb.Store { - return newClosableTable(openOldDB("gossip"), []byte{byte(b)}) + return newClosableTable(openOldDB("gossip"), []byte("Lr")) }, openDst: func() kvdb.Store { - if b == int('M') || b == int('r') || b == int('x') || b == int('X') { - return openNewDB("evm/" + string([]byte{byte(b)})) - } else { - return openNewDB("gossip/" + string([]byte{translateGossipPrefix(byte(b))})) - } + return openNewDB("evm-logs/r") + }, + name: "gossip/Lr", + dir: chaindataDir, + }) + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return newClosableTable(openOldDB("gossip"), []byte("Lt")) + }, + openDst: func() kvdb.Store { + return openNewDB("evm-logs/t") }, - name: fmt.Sprintf("gossip/%c", rune(b)), - dir: chaindataDir, - dropSrc: b == 0xff, + name: "gossip/Lt", + dir: chaindataDir, }) + + // skip 0 prefix, as it contains flushID + for b := 1; b <= 0xff; b++ { + if b == int('L') { + // logs are already moved above + continue + } + mustTransform(transformTask{ + openSrc: func() kvdb.Store { + return newClosableTable(openOldDB("gossip"), []byte{byte(b)}) + }, + openDst: func() kvdb.Store { + if b == int('M') || b == int('r') || b == int('x') || b == int('X') { + return openNewDB("evm/" + string([]byte{byte(b)})) + } else { + return openNewDB("gossip/" + string([]byte{translateGossipPrefix(byte(b))})) + } + }, + name: fmt.Sprintf("gossip/%c", rune(b)), + dir: chaindataDir, + dropSrc: b == 0xff, + }) + } + } else if mode == "reformat" { + if oldDBsType == "ldb" { + _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "leveldb-fsh", "main")) + for _, name := range oldDBs.Names() { + if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { + _ = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "leveldb-fsh", name)) + } + } + } else { + _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "pebble-fsh", "main")) + for _, name := range oldDBs.Names() { + if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { + _ = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "pebble-fsh", name)) + } + } + } + } else { + return errors.New("missing --db.migration.mode flag") } } diff --git a/integration/presets.go b/integration/presets.go new file mode 100644 index 000000000..dda4baf83 --- /dev/null +++ b/integration/presets.go @@ -0,0 +1,426 @@ +package integration + +import ( + "github.com/Fantom-foundation/lachesis-base/kvdb/multidb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +/* + * pbl-1 config + */ + +func Pbl1DBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { + return DBsConfig{ + Routing: Pbl1RoutingConfig(), + RuntimeCache: Pbl1RuntimeDBsCacheConfig(scale, fdlimit), + GenesisCache: Pbl1GenesisDBsCacheConfig(scale, fdlimit), + } +} + +func Pbl1RoutingConfig() RoutingConfig { + return RoutingConfig{ + Table: map[string]multidb.Route{ + "": { + Type: "pebble-fsh", + }, + "lachesis": { + Type: "pebble-fsh", + Name: "main", + Table: "L", + }, + "gossip": { + Type: "pebble-fsh", + Name: "main", + }, + "evm": { + Type: "pebble-fsh", + Name: "main", + }, + "gossip/e": { + Type: "pebble-fsh", + Name: "events", + }, + "evm/M": { + Type: "pebble-drc", + Name: "evm-data", + }, + "evm-logs": { + Type: "pebble-fsh", + Name: "evm-logs", + }, + "gossip-%d": { + Type: "leveldb-fsh", + Name: "epoch-%d", + Table: "G", + }, + "lachesis-%d": { + Type: "leveldb-fsh", + Name: "epoch-%d", + Table: "L", + NoDrop: true, + }, + }, + } +} + +func Pbl1RuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "evm-data": { + Cache: scale(242 * opt.MiB), + Fdlimit: fdlimit*242/700 + 1, + }, + "evm-logs": { + Cache: scale(110 * opt.MiB), + Fdlimit: fdlimit*110/700 + 1, + }, + "main": { + Cache: scale(186 * opt.MiB), + Fdlimit: fdlimit*186/700 + 1, + }, + "events": { + Cache: scale(87 * opt.MiB), + Fdlimit: fdlimit*87/700 + 1, + }, + "epoch-%d": { + Cache: scale(75 * opt.MiB), + Fdlimit: fdlimit*75/700 + 1, + }, + "": { + Cache: 64 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func Pbl1GenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "evm-data": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "evm-logs": { + Cache: scale(1024 * opt.MiB), + Fdlimit: fdlimit*1024/3072 + 1, + }, + "events": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "epoch-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "": { + Cache: 16 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +/* + * legacy-ldb config + */ + +func LdbLegacyDBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { + return DBsConfig{ + Routing: LdbLegacyRoutingConfig(), + RuntimeCache: LdbLegacyRuntimeDBsCacheConfig(scale, fdlimit), + GenesisCache: LdbLegacyGenesisDBsCacheConfig(scale, fdlimit), + } +} + +func LdbLegacyRoutingConfig() RoutingConfig { + return RoutingConfig{ + Table: map[string]multidb.Route{ + "": { + Type: "leveldb-fsh", + }, + "lachesis": { + Type: "leveldb-fsh", + Name: "lachesis", + }, + "gossip": { + Type: "leveldb-fsh", + Name: "main", + }, + + "gossip/S": { + Type: "leveldb-fsh", + Name: "main", + Table: "!", + }, + "gossip/R": { + Type: "leveldb-fsh", + Name: "main", + Table: "@", + }, + "gossip/Q": { + Type: "leveldb-fsh", + Name: "main", + Table: "#", + }, + + "gossip/T": { + Type: "leveldb-fsh", + Name: "main", + Table: "$", + }, + "gossip/J": { + Type: "leveldb-fsh", + Name: "main", + Table: "%", + }, + "gossip/E": { + Type: "leveldb-fsh", + Name: "main", + Table: "^", + }, + + "gossip/I": { + Type: "leveldb-fsh", + Name: "main", + Table: "&", + }, + "gossip/G": { + Type: "leveldb-fsh", + Name: "main", + Table: "*", + }, + "gossip/F": { + Type: "leveldb-fsh", + Name: "main", + Table: "(", + }, + + "evm": { + Type: "leveldb-fsh", + Name: "main", + }, + "evm-logs": { + Type: "leveldb-fsh", + Name: "main", + Table: "L", + }, + "gossip-%d": { + Type: "leveldb-fsh", + Name: "gossip-%d", + }, + "lachesis-%d": { + Type: "leveldb-fsh", + Name: "lachesis-%d", + }, + }, + } +} + +func LdbLegacyRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(564 * opt.MiB), + Fdlimit: fdlimit*564/700 + 1, + }, + "lachesis": { + Cache: scale(8 * opt.MiB), + Fdlimit: fdlimit*8/700 + 1, + }, + "gossip-%d": { + Cache: scale(64 * opt.MiB), + Fdlimit: fdlimit*64/700 + 1, + }, + "lachesis-%d": { + Cache: scale(64 * opt.MiB), + Fdlimit: fdlimit*64/700 + 1, + }, + "": { + Cache: 64 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func LdbLegacyGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(3072 * opt.MiB), + Fdlimit: fdlimit*3072 + 1, + }, + "lachesis": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "gossip-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "lachesis-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "": { + Cache: 16 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +/* + * legacy-pbl config + */ + +func PblLegacyDBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { + return DBsConfig{ + Routing: PblLegacyRoutingConfig(), + RuntimeCache: PblLegacyRuntimeDBsCacheConfig(scale, fdlimit), + GenesisCache: PblLegacyGenesisDBsCacheConfig(scale, fdlimit), + } +} + +func PblLegacyRoutingConfig() RoutingConfig { + return RoutingConfig{ + Table: map[string]multidb.Route{ + "": { + Type: "pebble-fsh", + }, + "lachesis": { + Type: "pebble-fsh", + Name: "lachesis", + }, + "gossip": { + Type: "pebble-fsh", + Name: "main", + }, + + "gossip/S": { + Type: "pebble-fsh", + Name: "main", + Table: "!", + }, + "gossip/R": { + Type: "pebble-fsh", + Name: "main", + Table: "@", + }, + "gossip/Q": { + Type: "pebble-fsh", + Name: "main", + Table: "#", + }, + + "gossip/T": { + Type: "pebble-fsh", + Name: "main", + Table: "$", + }, + "gossip/J": { + Type: "pebble-fsh", + Name: "main", + Table: "%", + }, + "gossip/E": { + Type: "pebble-fsh", + Name: "main", + Table: "^", + }, + + "gossip/I": { + Type: "pebble-fsh", + Name: "main", + Table: "&", + }, + "gossip/G": { + Type: "pebble-fsh", + Name: "main", + Table: "*", + }, + "gossip/F": { + Type: "pebble-fsh", + Name: "main", + Table: "(", + }, + + "evm": { + Type: "pebble-fsh", + Name: "main", + }, + "evm-logs": { + Type: "pebble-fsh", + Name: "main", + Table: "L", + }, + "gossip-%d": { + Type: "pebble-fsh", + Name: "gossip-%d", + }, + "lachesis-%d": { + Type: "pebble-fsh", + Name: "lachesis-%d", + }, + }, + } +} + +func PblLegacyRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(564 * opt.MiB), + Fdlimit: fdlimit*564/700 + 1, + }, + "lachesis": { + Cache: scale(8 * opt.MiB), + Fdlimit: fdlimit*8/700 + 1, + }, + "gossip-%d": { + Cache: scale(64 * opt.MiB), + Fdlimit: fdlimit*64/700 + 1, + }, + "lachesis-%d": { + Cache: scale(64 * opt.MiB), + Fdlimit: fdlimit*64/700 + 1, + }, + "": { + Cache: 64 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func PblLegacyGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(3072 * opt.MiB), + Fdlimit: fdlimit*3072 + 1, + }, + "lachesis": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "gossip-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "lachesis-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "": { + Cache: 16 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} diff --git a/integration/routing.go b/integration/routing.go index dea18ef0e..1ca3f7563 100644 --- a/integration/routing.go +++ b/integration/routing.go @@ -13,52 +13,6 @@ type RoutingConfig struct { Table map[string]multidb.Route } -func DefaultRoutingConfig() RoutingConfig { - return RoutingConfig{ - Table: map[string]multidb.Route{ - "": { - Type: "pebble-fsh", - }, - "lachesis": { - Type: "pebble-fsh", - Name: "main", - Table: "L", - }, - "gossip": { - Type: "pebble-fsh", - Name: "main", - }, - "evm": { - Type: "pebble-fsh", - Name: "main", - }, - "gossip/e": { - Type: "pebble-fsh", - Name: "events", - }, - "evm/M": { - Type: "pebble-drc", - Name: "evm-data", - }, - "evm-logs": { - Type: "pebble-fsh", - Name: "evm-logs", - }, - "gossip-%d": { - Type: "leveldb-fsh", - Name: "epoch-%d", - Table: "G", - }, - "lachesis-%d": { - Type: "leveldb-fsh", - Name: "epoch-%d", - Table: "L", - NoDrop: true, - }, - }, - } -} - func MakeMultiProducer(rawProducers map[multidb.TypeName]kvdb.IterableDBProducer, scopedProducers map[multidb.TypeName]kvdb.FullDBProducer, cfg RoutingConfig) (kvdb.FullDBProducer, error) { cachedProducers := make(map[multidb.TypeName]kvdb.FullDBProducer) var flushID []byte From d3b5971e5b2c9323c2f882d4e6cc9bc0e73d8977 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Mon, 29 Aug 2022 17:32:58 +0700 Subject: [PATCH 69/87] add ldb-1 DB preset and use it by default --- cmd/opera/launcher/config.go | 2 +- integration/db.go | 2 - integration/presets.go | 93 +++++++++++++++++++++++++++++++++++- 3 files changed, 93 insertions(+), 4 deletions(-) diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index 338c2be0a..59f1689d1 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -351,7 +351,7 @@ func setDBConfig(ctx *cli.Context, cfg integration.DBsConfig, cacheRatio cachesc if preset == "pbl-1" { cfg = integration.Pbl1DBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) } else if preset == "ldb-1" { - utils.Fatalf("--%s: ldb-1 preset is not unsupported yet", DBPresetFlag.Name) + cfg = integration.Ldb1DBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) } else if preset == "legacy-ldb" { cfg = integration.LdbLegacyDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) } else if preset == "legacy-pbl" { diff --git a/integration/db.go b/integration/db.go index 1a24e265c..8ed91bd4c 100644 --- a/integration/db.go +++ b/integration/db.go @@ -39,8 +39,6 @@ type DBsCacheConfig struct { Table map[string]DBCacheConfig } -var DefaultDBsConfig = Pbl1DBsConfig - func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName]kvdb.IterableDBProducer, map[multidb.TypeName]kvdb.FullDBProducer) { if chaindataDir == "inmemory" || chaindataDir == "" { chaindataDir, _ = ioutil.TempDir("", "opera-tmp") diff --git a/integration/presets.go b/integration/presets.go index dda4baf83..1a9683419 100644 --- a/integration/presets.go +++ b/integration/presets.go @@ -5,6 +5,8 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" ) +var DefaultDBsConfig = Ldb1DBsConfig + /* * pbl-1 config */ @@ -26,7 +28,7 @@ func Pbl1RoutingConfig() RoutingConfig { "lachesis": { Type: "pebble-fsh", Name: "main", - Table: "L", + Table: "C", }, "gossip": { Type: "pebble-fsh", @@ -125,6 +127,95 @@ func Pbl1GenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCac } } +/* + * ldb-1 config + */ + +func Ldb1DBsConfig(scale func(uint64) uint64, fdlimit uint64) DBsConfig { + return DBsConfig{ + Routing: Ldb1RoutingConfig(), + RuntimeCache: Ldb1RuntimeDBsCacheConfig(scale, fdlimit), + GenesisCache: Ldb1GenesisDBsCacheConfig(scale, fdlimit), + } +} + +func Ldb1RoutingConfig() RoutingConfig { + return RoutingConfig{ + Table: map[string]multidb.Route{ + "": { + Type: "leveldb-fsh", + }, + "lachesis": { + Type: "leveldb-fsh", + Name: "main", + Table: "C", + }, + "gossip": { + Type: "leveldb-fsh", + Name: "main", + }, + "evm": { + Type: "leveldb-fsh", + Name: "main", + }, + "evm-logs": { + Type: "leveldb-fsh", + Name: "main", + Table: "L", + }, + "gossip-%d": { + Type: "leveldb-fsh", + Name: "epoch-%d", + Table: "G", + }, + "lachesis-%d": { + Type: "leveldb-fsh", + Name: "epoch-%d", + Table: "L", + NoDrop: true, + }, + }, + } +} + +func Ldb1RuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(625 * opt.MiB), + Fdlimit: fdlimit*625/700 + 1, + }, + "epoch-%d": { + Cache: scale(75 * opt.MiB), + Fdlimit: fdlimit*75/700 + 1, + }, + "": { + Cache: 64 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + +func Ldb1GenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCacheConfig { + return DBsCacheConfig{ + Table: map[string]DBCacheConfig{ + "main": { + Cache: scale(3072 * opt.MiB), + Fdlimit: fdlimit, + }, + "epoch-%d": { + Cache: scale(1 * opt.MiB), + Fdlimit: fdlimit*1/3072 + 1, + }, + "": { + Cache: 16 * opt.MiB, + Fdlimit: fdlimit/100 + 1, + }, + }, + } +} + /* * legacy-ldb config */ From 9c6fd7b79f780e2f0d5c882f02a88ac73c13b197 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Mon, 29 Aug 2022 17:36:35 +0700 Subject: [PATCH 70/87] go fmt --- gossip/c_block_callbacks.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gossip/c_block_callbacks.go b/gossip/c_block_callbacks.go index 6e2dabc89..a0a8231ba 100644 --- a/gossip/c_block_callbacks.go +++ b/gossip/c_block_callbacks.go @@ -49,9 +49,9 @@ var ( snapshotStorageReadTimer = metrics.GetOrRegisterTimer("chain/snapshot/storage/reads", nil) snapshotCommitTimer = metrics.GetOrRegisterTimer("chain/snapshot/commits", nil) - blockInsertTimer = metrics.GetOrRegisterTimer("chain/inserts", nil) - blockExecutionTimer = metrics.GetOrRegisterTimer("chain/execution", nil) - blockWriteTimer = metrics.GetOrRegisterTimer("chain/write", nil) + blockInsertTimer = metrics.GetOrRegisterTimer("chain/inserts", nil) + blockExecutionTimer = metrics.GetOrRegisterTimer("chain/execution", nil) + blockWriteTimer = metrics.GetOrRegisterTimer("chain/write", nil) ) type ExtendedTxPosition struct { From dff7354d884f5889adb1f03e9d5de6e5cc970055 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 30 Aug 2022 16:53:45 +0700 Subject: [PATCH 71/87] enforce correct DB layout with --db.migration.mode=reformat --- integration/assembly.go | 2 +- integration/legacy_migrate.go | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/integration/assembly.go b/integration/assembly.go index 7c9915875..b480e15b0 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -252,7 +252,7 @@ func MakeEngine(chaindataDir string, g *genesis.Genesis, cfg Configs) (*abft.Lac if err != nil { utils.Fatalf("Failed to make engine: %v", err) } - err = migrateLegacyDBs(chaindataDir, dbs, cfg.DBs.MigrationMode) + err = migrateLegacyDBs(chaindataDir, dbs, cfg.DBs.MigrationMode, cfg.DBs.Routing) _ = dbs.Close() if err != nil { utils.Fatalf("Failed to migrate state: %v", err) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 019bf1036..c54710888 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -186,7 +186,19 @@ func translateGossipPrefix(p byte) byte { return p } -func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode string) error { +func equalRoutingConfig(a, b RoutingConfig) bool { + if len(a.Table) != len(b.Table) { + return false + } + for k, v := range a.Table { + if b.Table[k] != v { + return false + } + } + return true +} + +func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode string, layout RoutingConfig) error { if !isEmpty(path.Join(chaindataDir, "gossip")) { // migrate DB layout cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ @@ -289,6 +301,9 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st } } else if mode == "reformat" { if oldDBsType == "ldb" { + if !equalRoutingConfig(layout, LdbLegacyRoutingConfig()) { + return errors.New("reformatting DBs: missing --db.preset=legacy-ldb flag") + } _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "leveldb-fsh", "main")) for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { @@ -296,6 +311,9 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st } } } else { + if !equalRoutingConfig(layout, PblLegacyRoutingConfig()) { + return errors.New("reformatting DBs: missing --db.preset=legacy-pbl flag") + } _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "pebble-fsh", "main")) for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { From 5fa11e4002980f76539278977e9c8e99b23f1b9c Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 30 Aug 2022 16:54:40 +0700 Subject: [PATCH 72/87] add error handling for renaming operations of --db.migration.mode=reformat --- integration/legacy_migrate.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index c54710888..9b6b43ebd 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -304,20 +304,32 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st if !equalRoutingConfig(layout, LdbLegacyRoutingConfig()) { return errors.New("reformatting DBs: missing --db.preset=legacy-ldb flag") } - _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "leveldb-fsh", "main")) + err = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "leveldb-fsh", "main")) + if err != nil { + return err + } for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { - _ = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "leveldb-fsh", name)) + err = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "leveldb-fsh", name)) + if err != nil { + return err + } } } } else { if !equalRoutingConfig(layout, PblLegacyRoutingConfig()) { return errors.New("reformatting DBs: missing --db.preset=legacy-pbl flag") } - _ = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "pebble-fsh", "main")) + err = os.Rename(path.Join(chaindataDir, "gossip"), path.Join(chaindataDir, "pebble-fsh", "main")) + if err != nil { + return err + } for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { - _ = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "pebble-fsh", name)) + err = os.Rename(path.Join(chaindataDir, name), path.Join(chaindataDir, "pebble-fsh", name)) + if err != nil { + return err + } } } } From 526d6a8076df65f47997d156fc8bfc29c0df2ef3 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Tue, 30 Aug 2022 17:03:23 +0700 Subject: [PATCH 73/87] rm duplicate isEmpty(path.Join(chaindataDir, gossip)) check --- integration/legacy_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 9b6b43ebd..9e5076026 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -199,7 +199,7 @@ func equalRoutingConfig(a, b RoutingConfig) bool { } func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode string, layout RoutingConfig) error { - if !isEmpty(path.Join(chaindataDir, "gossip")) { + { // didn't erase the brackets to avoid massive code changes // migrate DB layout cacheFn, err := dbCacheFdlimit(DBsCacheConfig{ Table: map[string]DBCacheConfig{ From 4b2bdf218853c7652be344e7a4b08d6474a855df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Fri, 17 Jun 2022 11:21:27 +0700 Subject: [PATCH 74/87] add disk size metric for multidb testing --- cmd/opera/launcher/dbcmd.go | 4 ++-- ethapi/api.go | 5 +---- integration/metric.go | 29 ++++++++++++++++++++++++++--- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index 6e5355e91..b5626fb4b 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -156,12 +156,12 @@ func compact(ctx *cli.Context) error { } func showLeveldbStats(db ethdb.Stater) { - if stats, err := db.Stat("leveldb.stats"); err != nil { + if stats, err := db.Stat("stats"); err != nil { log.Warn("Failed to read database stats", "error", err) } else { fmt.Println(stats) } - if ioStats, err := db.Stat("leveldb.iostats"); err != nil { + if ioStats, err := db.Stat("iostats"); err != nil { log.Warn("Failed to read database iostats", "error", err) } else { fmt.Println(ioStats) diff --git a/ethapi/api.go b/ethapi/api.go index 70aa74320..af693399d 100644 --- a/ethapi/api.go +++ b/ethapi/api.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "math/big" - "strings" "time" "github.com/Fantom-foundation/lachesis-base/hash" @@ -2066,9 +2065,7 @@ func NewPrivateDebugAPI(b Backend) *PrivateDebugAPI { // ChaindbProperty returns leveldb properties of the key-value database. func (api *PrivateDebugAPI) ChaindbProperty(property string) (string, error) { if property == "" { - property = "leveldb.stats" - } else if !strings.HasPrefix(property, "leveldb.") { - property = "leveldb." + property + property = "stats" } return api.b.ChainDb().Stat(property) } diff --git a/integration/metric.go b/integration/metric.go index c6386b4de..f2d8bc160 100644 --- a/integration/metric.go +++ b/integration/metric.go @@ -24,6 +24,7 @@ type DBProducerWithMetrics struct { type StoreWithMetrics struct { kvdb.Store + diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written @@ -74,8 +75,26 @@ func (ds *StoreWithMetrics) meter(refresh time.Duration) { defer timer.Stop() // Iterate ad infinitum and collect the stats for i := 1; errc == nil && merr == nil; i++ { + // Retrieve the database size + diskSize, err := ds.Stat("disk.size") + if err != nil { + ds.log.Error("Failed to read database stats", "err", err) + merr = err + continue + } + var nDiskSize float64 + if n, err := fmt.Sscanf(diskSize, "Size(MB):%f", &nDiskSize); n != 1 || err != nil { + ds.log.Error("Bad syntax of disk size entry", "size", diskSize) + merr = err + continue + } + // Update all the disk size meters + if ds.diskSizeGauge != nil { + ds.diskSizeGauge.Update(int64(nDiskSize * 1024 * 1024)) + } + // Retrieve the database iostats. - ioStats, err := ds.Stat("leveldb.iostats") + ioStats, err := ds.Stat("iostats") if err != nil { ds.log.Error("Failed to read database iostats", "err", err) merr = err @@ -127,11 +146,15 @@ func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { return nil, err } dm := WrapStoreWithMetrics(ds) + // disk size gauge should be meter separatly for each db name; otherwise, + // the last db siae metric will overwrite all the previoius one + dm.diskSizeGauge = metrics.GetOrRegisterGauge("opera/chaindata/"+name+"/disk/size", nil) + logger := log.New("database", name) + dm.log = logger + if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { name = "epochs" } - logger := log.New("database", name) - dm.log = logger dm.diskReadMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/read", nil) dm.diskWriteMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/write", nil) From 72b163b108612bcd0edc955c5e14e130b5d8e219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Tue, 2 Aug 2022 10:28:00 +0700 Subject: [PATCH 75/87] upgrade lachesis-base version --- go.mod | 2 ++ go.sum | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index d244feaea..a2e9a1d1b 100644 --- a/go.mod +++ b/go.mod @@ -46,4 +46,6 @@ require ( replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 +replace github.com/Fantom-foundation/lachesis-base => github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f + replace github.com/dvyukov/go-fuzz => github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f diff --git a/go.sum b/go.sum index aef4dd77b..78b5c7d10 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,6 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 h1:wt1bhCeb3E0qzDV2iuv2qYlNDO/j8DFJfzVHSnevVOU= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb h1:kyCcawuYwMe+FJy0KYRFqGWrHYNoGixO9QTIceD8ngM= -github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -287,6 +285,8 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKh github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f h1:GQpcb9ywkaawJfZCFeIcrSWlsZFRTsig42e03dkzc+I= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f/go.mod h1:Q5On640X2Z0YzKOijx9GVhUu/kvHnk9aKoWGMlRDMtc= +github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f h1:xwWrxjbSFAPhYAaYrStG0dIqLyRKMlH7w0c7x8uUGuM= +github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= From e2470f6c708f3857f9bd60908ea213699fd97c54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Tue, 2 Aug 2022 10:31:41 +0700 Subject: [PATCH 76/87] update method name for more relevant with multidb --- cmd/opera/launcher/dbcmd.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/opera/launcher/dbcmd.go b/cmd/opera/launcher/dbcmd.go index b5626fb4b..9228dd427 100644 --- a/cmd/opera/launcher/dbcmd.go +++ b/cmd/opera/launcher/dbcmd.go @@ -136,7 +136,7 @@ func compact(ctx *cli.Context) error { } log.Info("Stats before compaction", "db", humanName) - showLeveldbStats(db) + showDbStats(db) log.Info("Triggering compaction", "db", humanName) for b := byte(0); b < 255; b++ { @@ -148,14 +148,14 @@ func compact(ctx *cli.Context) error { } log.Info("Stats after compaction", "db", humanName) - showLeveldbStats(db) + showDbStats(db) } } return nil } -func showLeveldbStats(db ethdb.Stater) { +func showDbStats(db ethdb.Stater) { if stats, err := db.Stat("stats"); err != nil { log.Warn("Failed to read database stats", "error", err) } else { From dfd9a64d7fced9a5dda71632b031226c401954a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Tue, 2 Aug 2022 22:33:17 +0700 Subject: [PATCH 77/87] update disk.size metric collector --- integration/metric.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/integration/metric.go b/integration/metric.go index f2d8bc160..e6ad21231 100644 --- a/integration/metric.go +++ b/integration/metric.go @@ -82,15 +82,15 @@ func (ds *StoreWithMetrics) meter(refresh time.Duration) { merr = err continue } - var nDiskSize float64 - if n, err := fmt.Sscanf(diskSize, "Size(MB):%f", &nDiskSize); n != 1 || err != nil { + var nDiskSize int64 + if n, err := fmt.Sscanf(diskSize, "Size(B):%d", &nDiskSize); n != 1 || err != nil { ds.log.Error("Bad syntax of disk size entry", "size", diskSize) merr = err continue } // Update all the disk size meters if ds.diskSizeGauge != nil { - ds.diskSizeGauge.Update(int64(nDiskSize * 1024 * 1024)) + ds.diskSizeGauge.Update(nDiskSize) } // Retrieve the database iostats. @@ -141,6 +141,7 @@ func (ds *StoreWithMetrics) meter(refresh time.Duration) { } func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { + println("name: " + name) ds, err := db.FlushableDBProducer.OpenDB(name) if err != nil { return nil, err From f3bbaf56bfdae395eb158889d227b51cc29ead5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Tue, 2 Aug 2022 22:44:30 +0700 Subject: [PATCH 78/87] accumulate all sub database metric to a single one --- integration/metric.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/integration/metric.go b/integration/metric.go index e6ad21231..d6e06e765 100644 --- a/integration/metric.go +++ b/integration/metric.go @@ -141,7 +141,6 @@ func (ds *StoreWithMetrics) meter(refresh time.Duration) { } func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { - println("name: " + name) ds, err := db.FlushableDBProducer.OpenDB(name) if err != nil { return nil, err @@ -156,6 +155,16 @@ func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { name = "epochs" } + + if strings.HasPrefix(name, "gossip/") { + name = "gossip" + } + if strings.HasPrefix(name, "evm/") { + name = "evm" + } + if strings.HasPrefix(name, "evm-logs/") { + name = "evm-logs" + } dm.diskReadMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/read", nil) dm.diskWriteMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/write", nil) From 220d8d1c728c4cc7e2e719046f0762fb5949fb01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ha=20=C4=90ANG?= Date: Thu, 1 Sep 2022 12:25:32 +0700 Subject: [PATCH 79/87] try to make metrics wrapper for physical db producers --- integration/assembly.go | 10 +--------- integration/db.go | 10 ++++++++++ integration/metric.go | 21 +++++---------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/integration/assembly.go b/integration/assembly.go index 85bd1ebcf..6ccef1061 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/status-im/keycard-go/hexutils" "github.com/Fantom-foundation/go-opera/gossip" @@ -193,14 +192,7 @@ func makeEngine(chaindataDir string, g *genesis.Genesis, genesisProc bool, cfg C if err != nil { return nil, nil, nil, nil, gossip.BlockProc{}, nil, err } - var wdbs kvdb.FlushableDBProducer - // final DB wrappers - if metrics.Enabled { - wdbs = WrapDatabaseWithMetrics(dbs) - } else { - wdbs = dbs - } - gdb, cdb := getStores(wdbs, cfg) + gdb, cdb := getStores(dbs, cfg) defer func() { if err != nil { gdb.Close() diff --git a/integration/db.go b/integration/db.go index 99f39b10f..e6ce9b940 100644 --- a/integration/db.go +++ b/integration/db.go @@ -18,6 +18,7 @@ import ( "github.com/Fantom-foundation/lachesis-base/utils/fmtfilter" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/Fantom-foundation/go-opera/gossip" @@ -125,6 +126,15 @@ func SupportedDBs(chaindataDir string, cfg DBsCacheConfig) (map[multidb.TypeName pebbleFlg := pebble.NewProducer(path.Join(chaindataDir, "pebble-flg"), cacher) pebbleDrc := pebble.NewProducer(path.Join(chaindataDir, "pebble-drc"), cacher) + if metrics.Enabled { + leveldbFsh = WrapDatabaseWithMetrics(leveldbFsh) + leveldbFlg = WrapDatabaseWithMetrics(leveldbFlg) + leveldbDrc = WrapDatabaseWithMetrics(leveldbDrc) + pebbleFsh = WrapDatabaseWithMetrics(pebbleFsh) + pebbleFlg = WrapDatabaseWithMetrics(pebbleFlg) + pebbleDrc = WrapDatabaseWithMetrics(pebbleDrc) + } + return map[multidb.TypeName]kvdb.IterableDBProducer{ "leveldb-fsh": leveldbFsh, "leveldb-flg": leveldbFlg, diff --git a/integration/metric.go b/integration/metric.go index d6e06e765..f01cff8e8 100644 --- a/integration/metric.go +++ b/integration/metric.go @@ -18,7 +18,7 @@ const ( ) type DBProducerWithMetrics struct { - kvdb.FlushableDBProducer + kvdb.IterableDBProducer } type StoreWithMetrics struct { @@ -34,7 +34,7 @@ type StoreWithMetrics struct { log log.Logger // Contextual logger tracking the database path } -func WrapDatabaseWithMetrics(db kvdb.FlushableDBProducer) kvdb.FlushableDBProducer { +func WrapDatabaseWithMetrics(db kvdb.IterableDBProducer) kvdb.IterableDBProducer { wrapper := &DBProducerWithMetrics{db} return wrapper } @@ -141,7 +141,7 @@ func (ds *StoreWithMetrics) meter(refresh time.Duration) { } func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { - ds, err := db.FlushableDBProducer.OpenDB(name) + ds, err := db.IterableDBProducer.OpenDB(name) if err != nil { return nil, err } @@ -149,22 +149,11 @@ func (db *DBProducerWithMetrics) OpenDB(name string) (kvdb.Store, error) { // disk size gauge should be meter separatly for each db name; otherwise, // the last db siae metric will overwrite all the previoius one dm.diskSizeGauge = metrics.GetOrRegisterGauge("opera/chaindata/"+name+"/disk/size", nil) - logger := log.New("database", name) - dm.log = logger - if strings.HasPrefix(name, "gossip-") || strings.HasPrefix(name, "lachesis-") { name = "epochs" } - - if strings.HasPrefix(name, "gossip/") { - name = "gossip" - } - if strings.HasPrefix(name, "evm/") { - name = "evm" - } - if strings.HasPrefix(name, "evm-logs/") { - name = "evm-logs" - } + logger := log.New("database", name) + dm.log = logger dm.diskReadMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/read", nil) dm.diskWriteMeter = metrics.GetOrRegisterMeter("opera/chaindata/"+name+"/disk/write", nil) From c873ab26974aab69009152368e864867c807ae93 Mon Sep 17 00:00:00 2001 From: Egor Lysenko <68006922+uprendis@users.noreply.github.com> Date: Thu, 1 Sep 2022 23:45:48 +0700 Subject: [PATCH 80/87] refactor DBs presets switch Co-authored-by: rus-alex <70567906+rus-alex@users.noreply.github.com> --- cmd/opera/launcher/config.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index 59f1689d1..a9be1b711 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -348,15 +348,16 @@ func gossipStoreConfigWithFlags(ctx *cli.Context, src gossip.StoreConfig) (gossi func setDBConfig(ctx *cli.Context, cfg integration.DBsConfig, cacheRatio cachescale.Func) integration.DBsConfig { if ctx.GlobalIsSet(DBPresetFlag.Name) { preset := ctx.GlobalString(DBPresetFlag.Name) - if preset == "pbl-1" { + switch preset { + case "pbl-1": cfg = integration.Pbl1DBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) - } else if preset == "ldb-1" { + case "ldb-1": cfg = integration.Ldb1DBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) - } else if preset == "legacy-ldb" { + case "legacy-ldb": cfg = integration.LdbLegacyDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) - } else if preset == "legacy-pbl" { + case "legacy-pbl": cfg = integration.PblLegacyDBsConfig(cacheRatio.U64, uint64(utils.MakeDatabaseHandles())) - } else { + default: utils.Fatalf("--%s must be 'pbl-1', 'ldb-1', 'legacy-pbl' or 'legacy-ldb'", DBPresetFlag.Name) } } From 859c0c2856c830846790764f70f34c8cafa7925d Mon Sep 17 00:00:00 2001 From: Egor Lysenko <68006922+uprendis@users.noreply.github.com> Date: Thu, 1 Sep 2022 23:48:38 +0700 Subject: [PATCH 81/87] refactor migration mode switch Co-authored-by: rus-alex <70567906+rus-alex@users.noreply.github.com> --- integration/legacy_migrate.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 9e5076026..b4f8ff333 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -236,7 +236,8 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st return db } - if mode == "rebuild" { + switch mode { + case "rebuild": // move lachesis, lachesis-%d and gossip-%d DBs for _, name := range oldDBs.Names() { if strings.HasPrefix(name, "lachesis") || strings.HasPrefix(name, "gossip-") { From a004339b0e5cfebf9dda5b169e97ff731abbf56d Mon Sep 17 00:00:00 2001 From: Egor Lysenko <68006922+uprendis@users.noreply.github.com> Date: Thu, 1 Sep 2022 23:48:46 +0700 Subject: [PATCH 82/87] refactor migration mode switch Co-authored-by: rus-alex <70567906+rus-alex@users.noreply.github.com> --- integration/legacy_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index b4f8ff333..3e28d5ae6 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -300,7 +300,7 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st dropSrc: b == 0xff, }) } - } else if mode == "reformat" { + case "reformat": if oldDBsType == "ldb" { if !equalRoutingConfig(layout, LdbLegacyRoutingConfig()) { return errors.New("reformatting DBs: missing --db.preset=legacy-ldb flag") From c481d0fbe1cbf831a330912afe52e7413879b544 Mon Sep 17 00:00:00 2001 From: Egor Lysenko <68006922+uprendis@users.noreply.github.com> Date: Thu, 1 Sep 2022 23:48:52 +0700 Subject: [PATCH 83/87] refactor migration mode switch Co-authored-by: rus-alex <70567906+rus-alex@users.noreply.github.com> --- integration/legacy_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/legacy_migrate.go b/integration/legacy_migrate.go index 3e28d5ae6..7c5880c6c 100644 --- a/integration/legacy_migrate.go +++ b/integration/legacy_migrate.go @@ -334,7 +334,7 @@ func migrateLegacyDBs(chaindataDir string, dbs kvdb.FlushableDBProducer, mode st } } } - } else { + default: return errors.New("missing --db.migration.mode flag") } } From 9ecb1225d322e6def46444db4367649480653276 Mon Sep 17 00:00:00 2001 From: alex Date: Sun, 4 Sep 2022 03:11:43 +1000 Subject: [PATCH 84/87] backend extension --- gossip/filters/filter.go | 3 +++ gossip/filters/filter_system_test.go | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/gossip/filters/filter.go b/gossip/filters/filter.go index 3e0903e62..5701a79d5 100644 --- a/gossip/filters/filter.go +++ b/gossip/filters/filter.go @@ -41,6 +41,7 @@ type Backend interface { HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*evmcore.EvmHeader, error) HeaderByHash(ctx context.Context, blockHash common.Hash) (*evmcore.EvmHeader, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) + GetReceiptsByNumber(ctx context.Context, number rpc.BlockNumber) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) GetTxPosition(txid common.Hash) *evmstore.TxPosition @@ -49,6 +50,8 @@ type Backend interface { SubscribeLogsNotify(ch chan<- []*types.Log) notify.Subscription EvmLogIndex() *topicsdb.Index + + CalcBlockExtApi() bool } // Filter can be used to retrieve and filter logs. diff --git a/gossip/filters/filter_system_test.go b/gossip/filters/filter_system_test.go index 3a7e257cc..dcbeca3cb 100644 --- a/gossip/filters/filter_system_test.go +++ b/gossip/filters/filter_system_test.go @@ -102,6 +102,10 @@ func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types. return nil, nil } +func (b *testBackend) GetReceiptsByNumber(ctx context.Context, number rpc.BlockNumber) (types.Receipts, error) { + return rawdb.ReadReceipts(b.db, common.Hash{}, uint64(number), params.TestChainConfig), nil +} + func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { number := rawdb.ReadHeaderNumber(b.db, hash) if number == nil { @@ -137,6 +141,10 @@ func (b *testBackend) GetTxPosition(txid common.Hash) *evmstore.TxPosition { return nil } +func (b *testBackend) CalcBlockExtApi() bool { + return true +} + // TestBlockSubscription tests if a block subscription returns block hashes for posted chain notify. // It creates multiple subscriptions: // - one at the start and should receive all posted chain events and a second (blockHashes) From f58ccea4e72ba956c6165ca3acb9273dafb28776 Mon Sep 17 00:00:00 2001 From: alex Date: Sun, 4 Sep 2022 03:13:24 +1000 Subject: [PATCH 85/87] calculateExtBlockApi() before broadcasting --- gossip/filters/filter_system.go | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/gossip/filters/filter_system.go b/gossip/filters/filter_system.go index f27d1bdc7..879f0f024 100644 --- a/gossip/filters/filter_system.go +++ b/gossip/filters/filter_system.go @@ -19,6 +19,7 @@ package filters import ( + "context" "errors" "fmt" "sync" @@ -30,6 +31,7 @@ import ( notify "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" "github.com/Fantom-foundation/go-opera/evmcore" ) @@ -323,14 +325,35 @@ func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) { f.hashes <- hashes } case evmcore.ChainHeadNotify: + h := e.Block.EthHeader() + h.GasLimit = 0xffffffffffff // don't use too much bits here to avoid parsing issues + es.calculateExtBlockApi(h) for _, f := range filters[BlocksSubscription] { - h := e.Block.EthHeader() - h.GasLimit = 0xffffffffffff // don't use too much bits here to avoid parsing issues f.headers <- h } } } +// calculateExtBlockApi doubles ethapi/PublicBlockChainAPI.calculateExtBlockApi() functionality. +// TODO: common code. +func (es *EventSystem) calculateExtBlockApi(h *types.Header) { + blkNumber := rpc.BlockNumber(h.Number.Int64()) + if !es.backend.CalcBlockExtApi() || blkNumber == rpc.EarliestBlockNumber { + return + } + + receipts, err := es.backend.GetReceiptsByNumber(context.Background(), blkNumber) + if err != nil { + return + } + if receipts.Len() != 0 { + h.ReceiptHash = types.DeriveSha(receipts, trie.NewStackTrie(nil)) + h.Bloom = types.CreateBloom(receipts) + } else { + h.ReceiptHash = types.EmptyRootHash + } +} + // eventLoop (un)installs filters and processes mux events. func (es *EventSystem) eventLoop() { // Ensure all subscriptions get cleaned up From ae1a4676a4f7bd9e849e3563660bf662a7c26951 Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 4 Sep 2022 17:41:54 +0700 Subject: [PATCH 86/87] update lachesis-base and KVDB presets --- cmd/opera/launcher/config.go | 2 +- go.mod | 4 +- go.sum | 4 +- integration/presets.go | 100 +++++++++++++++++------------------ 4 files changed, 54 insertions(+), 56 deletions(-) diff --git a/cmd/opera/launcher/config.go b/cmd/opera/launcher/config.go index a9be1b711..ff81645b9 100644 --- a/cmd/opera/launcher/config.go +++ b/cmd/opera/launcher/config.go @@ -132,7 +132,7 @@ const ( // DefaultCacheSize is calculated as memory consumption in a worst case scenario with default configuration // Average memory consumption might be 3-5 times lower than the maximum DefaultCacheSize = 3600 - ConstantCacheSize = 1024 + ConstantCacheSize = 600 ) // These settings ensure that TOML keys use the same names as Go struct fields. diff --git a/go.mod b/go.mod index a2e9a1d1b..6a5697b5d 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/Fantom-foundation/go-opera go 1.14 require ( - github.com/Fantom-foundation/lachesis-base v0.0.0-20220811115347-2434192efadb + github.com/Fantom-foundation/lachesis-base v0.0.0-20220904103856-4bb2a8448a22 github.com/allegro/bigcache v1.2.1 // indirect github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40 // indirect github.com/cespare/cp v1.1.1 @@ -46,6 +46,4 @@ require ( replace github.com/ethereum/go-ethereum => github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 -replace github.com/Fantom-foundation/lachesis-base => github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f - replace github.com/dvyukov/go-fuzz => github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f diff --git a/go.sum b/go.sum index 78b5c7d10..a538ac710 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,8 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7 h1:wt1bhCeb3E0qzDV2iuv2qYlNDO/j8DFJfzVHSnevVOU= github.com/Fantom-foundation/go-ethereum v1.10.8-ftm-rc7/go.mod h1:IeQDjWCNBj/QiWIPosfF6/kRC6pHPNs7W7LfBzjj+P4= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220904103856-4bb2a8448a22 h1:wlB+I8x5mf1a3zlIuOTumy2ye/zglOIGcmTOfJ2bi8s= +github.com/Fantom-foundation/lachesis-base v0.0.0-20220904103856-4bb2a8448a22/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -285,8 +287,6 @@ github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKh github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f h1:GQpcb9ywkaawJfZCFeIcrSWlsZFRTsig42e03dkzc+I= github.com/guzenok/go-fuzz v0.0.0-20210103140116-f9104dfb626f/go.mod h1:Q5On640X2Z0YzKOijx9GVhUu/kvHnk9aKoWGMlRDMtc= -github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f h1:xwWrxjbSFAPhYAaYrStG0dIqLyRKMlH7w0c7x8uUGuM= -github.com/hadv/lachesis-base v0.0.0-20220802031837-77b6ebe3697f/go.mod h1:CFMonaK1v/QHwpjLszuycFDgLXNazZqZxciwhKwXDVQ= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/integration/presets.go b/integration/presets.go index 1a9683419..1a9b24c68 100644 --- a/integration/presets.go +++ b/integration/presets.go @@ -69,24 +69,24 @@ func Pbl1RuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCac return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "evm-data": { - Cache: scale(242 * opt.MiB), - Fdlimit: fdlimit*242/700 + 1, + Cache: scale(460 * opt.MiB), + Fdlimit: fdlimit*460/1400 + 1, }, "evm-logs": { - Cache: scale(110 * opt.MiB), - Fdlimit: fdlimit*110/700 + 1, + Cache: scale(260 * opt.MiB), + Fdlimit: fdlimit*220/1400 + 1, }, "main": { - Cache: scale(186 * opt.MiB), - Fdlimit: fdlimit*186/700 + 1, + Cache: scale(320 * opt.MiB), + Fdlimit: fdlimit*280/1400 + 1, }, "events": { - Cache: scale(87 * opt.MiB), - Fdlimit: fdlimit*87/700 + 1, + Cache: scale(240 * opt.MiB), + Fdlimit: fdlimit*200/1400 + 1, }, "epoch-%d": { - Cache: scale(75 * opt.MiB), - Fdlimit: fdlimit*75/700 + 1, + Cache: scale(100 * opt.MiB), + Fdlimit: fdlimit*100/1400 + 1, }, "": { Cache: 64 * opt.MiB, @@ -100,24 +100,24 @@ func Pbl1GenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCac return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, + Cache: scale(1000 * opt.MiB), + Fdlimit: fdlimit*1000/3000 + 1, }, "evm-data": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, + Cache: scale(1000 * opt.MiB), + Fdlimit: fdlimit*1000/3000 + 1, }, "evm-logs": { - Cache: scale(1024 * opt.MiB), - Fdlimit: fdlimit*1024/3072 + 1, + Cache: scale(1000 * opt.MiB), + Fdlimit: fdlimit*1000/3000 + 1, }, "events": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "epoch-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "": { Cache: 16 * opt.MiB, @@ -182,12 +182,12 @@ func Ldb1RuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCac return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(625 * opt.MiB), - Fdlimit: fdlimit*625/700 + 1, + Cache: scale(900 * opt.MiB), + Fdlimit: fdlimit*900/1000 + 1, }, "epoch-%d": { - Cache: scale(75 * opt.MiB), - Fdlimit: fdlimit*75/700 + 1, + Cache: scale(100 * opt.MiB), + Fdlimit: fdlimit*100/1000 + 1, }, "": { Cache: 64 * opt.MiB, @@ -201,12 +201,12 @@ func Ldb1GenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) DBsCac return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(3072 * opt.MiB), + Cache: scale(3000 * opt.MiB), Fdlimit: fdlimit, }, "epoch-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "": { Cache: 16 * opt.MiB, @@ -316,20 +316,20 @@ func LdbLegacyRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) D return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(564 * opt.MiB), - Fdlimit: fdlimit*564/700 + 1, + Cache: scale(850 * opt.MiB), + Fdlimit: fdlimit*850/1000 + 1, }, "lachesis": { - Cache: scale(8 * opt.MiB), - Fdlimit: fdlimit*8/700 + 1, + Cache: scale(20 * opt.MiB), + Fdlimit: fdlimit*20/1000 + 1, }, "gossip-%d": { - Cache: scale(64 * opt.MiB), - Fdlimit: fdlimit*64/700 + 1, + Cache: scale(50 * opt.MiB), + Fdlimit: fdlimit*50/1000 + 1, }, "lachesis-%d": { - Cache: scale(64 * opt.MiB), - Fdlimit: fdlimit*64/700 + 1, + Cache: scale(80 * opt.MiB), + Fdlimit: fdlimit*80/1000 + 1, }, "": { Cache: 64 * opt.MiB, @@ -343,20 +343,20 @@ func LdbLegacyGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) D return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(3072 * opt.MiB), - Fdlimit: fdlimit*3072 + 1, + Cache: scale(3000 * opt.MiB), + Fdlimit: fdlimit*3000 + 1, }, "lachesis": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "gossip-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "lachesis-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "": { Cache: 16 * opt.MiB, @@ -466,20 +466,20 @@ func PblLegacyRuntimeDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) D return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(564 * opt.MiB), - Fdlimit: fdlimit*564/700 + 1, + Cache: scale(950 * opt.MiB), + Fdlimit: fdlimit*950/1400 + 1, }, "lachesis": { - Cache: scale(8 * opt.MiB), - Fdlimit: fdlimit*8/700 + 1, + Cache: scale(150 * opt.MiB), + Fdlimit: fdlimit*150/1400 + 1, }, "gossip-%d": { - Cache: scale(64 * opt.MiB), - Fdlimit: fdlimit*64/700 + 1, + Cache: scale(150 * opt.MiB), + Fdlimit: fdlimit*150/1400 + 1, }, "lachesis-%d": { - Cache: scale(64 * opt.MiB), - Fdlimit: fdlimit*64/700 + 1, + Cache: scale(150 * opt.MiB), + Fdlimit: fdlimit*150/1400 + 1, }, "": { Cache: 64 * opt.MiB, @@ -493,20 +493,20 @@ func PblLegacyGenesisDBsCacheConfig(scale func(uint64) uint64, fdlimit uint64) D return DBsCacheConfig{ Table: map[string]DBCacheConfig{ "main": { - Cache: scale(3072 * opt.MiB), - Fdlimit: fdlimit*3072 + 1, + Cache: scale(3000 * opt.MiB), + Fdlimit: fdlimit, }, "lachesis": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "gossip-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "lachesis-%d": { Cache: scale(1 * opt.MiB), - Fdlimit: fdlimit*1/3072 + 1, + Fdlimit: fdlimit*1/3000 + 1, }, "": { Cache: 16 * opt.MiB, From 8530dc1374b93455ae758f8c3a7740af75bea60e Mon Sep 17 00:00:00 2001 From: Egor Lysenko Date: Sun, 4 Sep 2022 17:46:20 +0700 Subject: [PATCH 87/87] version up --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 5334017e6..d21bf2b28 100644 --- a/version/version.go +++ b/version/version.go @@ -11,7 +11,7 @@ func init() { params.VersionMajor = 1 // Major version component of the current release params.VersionMinor = 1 // Minor version component of the current release params.VersionPatch = 2 // Patch version component of the current release - params.VersionMeta = "rc.1" // Version metadata to append to the version string + params.VersionMeta = "rc.2" // Version metadata to append to the version string } func BigToString(b *big.Int) string {