diff --git a/cmd/geth/main.go b/cmd/geth/main.go index d35016e2a8..a6cdee182d 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -224,6 +224,7 @@ var ( utils.StateExpiryStateEpochPeriodFlag, utils.StateExpiryEnableLocalReviveFlag, utils.StateExpiryEnableRemoteModeFlag, + utils.StateExpiryPruneLevelFlag, } ) diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 99b9bc5161..adddb18f5c 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -440,12 +440,12 @@ func pruneState(ctx *cli.Context) error { StateExpiryCfg: cfg.Eth.StateExpiryCfg, } prunerconfig := pruner.Config{ - Datadir: stack.ResolvePath(""), - BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name), - EnableStateExpiry: cfg.Eth.StateExpiryCfg.EnableExpiry(), - ChainConfig: chainConfig, - CacheConfig: cacheConfig, - MaxExpireThreads: ctx.Uint64(utils.StateExpiryMaxThreadFlag.Name), + Datadir: stack.ResolvePath(""), + BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name), + ExpiryCfg: cfg.Eth.StateExpiryCfg, + ChainConfig: chainConfig, + CacheConfig: cacheConfig, + MaxExpireThreads: ctx.Uint64(utils.StateExpiryMaxThreadFlag.Name), } pruner, err := pruner.NewPruner(chaindb, prunerconfig, ctx.Uint64(utils.TriesInMemoryFlag.Name)) if err != nil { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index de013fbe5a..58c8484021 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1122,6 +1122,7 @@ var ( StateExpiryBaseFlags = []cli.Flag{ StateExpiryEnableFlag, StateExpiryEnableRemoteModeFlag, + StateExpiryPruneLevelFlag, } ) @@ -1169,6 +1170,12 @@ var ( Value: false, Category: flags.StateExpiryCategory, } + StateExpiryPruneLevelFlag = &cli.UintFlag{ + Name: "state-expiry.prunelevel", + Usage: "set prune level for state expiry", + Value: types.StateExpiryPruneLevel0, + Category: flags.StateExpiryCategory, + } ) func init() { @@ -2607,20 +2614,17 @@ func ParseStateExpiryConfig(ctx *cli.Context, disk ethdb.Database, scheme string } else if stored != nil { newCfg.StateEpochPeriod = stored.StateEpochPeriod } + if ctx.IsSet(StateExpiryPruneLevelFlag.Name) { + newCfg.PruneLevel = uint8(ctx.Uint(StateExpiryPruneLevelFlag.Name)) + } else if stored != nil { + newCfg.PruneLevel = stored.PruneLevel + } if ctx.IsSet(StateExpiryEnableLocalReviveFlag.Name) { newCfg.EnableLocalRevive = ctx.Bool(StateExpiryEnableLocalReviveFlag.Name) } - // override prune level - newCfg.PruneLevel = types.StateExpiryPruneLevel1 - switch newCfg.StateScheme { - case rawdb.HashScheme: - // TODO(0xbundler): will stop support HBSS later. - newCfg.PruneLevel = types.StateExpiryPruneLevel0 - case rawdb.PathScheme: - newCfg.PruneLevel = types.StateExpiryPruneLevel1 - default: - return nil, fmt.Errorf("not support the state scheme: %v", newCfg.StateScheme) + if rawdb.HashScheme == newCfg.StateScheme && types.StateExpiryPruneLevel1 != newCfg.PruneLevel { + return nil, errors.New("PruneLevel must be StateExpiryPruneLevel1 in HBSS") } if err := newCfg.Validation(); err != nil { diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 7550e4743b..2a6ed03728 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -65,7 +65,9 @@ const ( // to avoid triggering range compaction because of small deletion. rangeCompactionThreshold = 1000000 - FixedPrefixAndAddrSize = 33 + ContractTrieNodeAvgSize = 105 // This is an estimated value + + ContractEpochMetaAvgSize = 16 // This is an estimated value defaultReportDuration = 60 * time.Second @@ -74,12 +76,12 @@ const ( // Config includes all the configurations for pruning. type Config struct { - Datadir string // The directory of the state database - BloomSize uint64 // The Megabytes of memory allocated to bloom-filter - EnableStateExpiry bool - ChainConfig *params.ChainConfig - CacheConfig *core.CacheConfig - MaxExpireThreads uint64 + Datadir string // The directory of the state database + BloomSize uint64 // The Megabytes of memory allocated to bloom-filter + ExpiryCfg *types.StateExpiryConfig + ChainConfig *params.ChainConfig + CacheConfig *core.CacheConfig + MaxExpireThreads uint64 } // Pruner is an offline tool to prune the stale state with the @@ -702,7 +704,7 @@ func (p *Pruner) Prune(root common.Hash) error { // ExpiredPrune it must run later to prune, using bloom filter in HBSS to prevent pruning in use trie node, cannot prune concurrently. // but in PBSS, it need not bloom filter func (p *Pruner) ExpiredPrune(height *big.Int, root common.Hash) error { - if !p.config.EnableStateExpiry { + if !p.config.ExpiryCfg.EnableExpiry() { log.Info("stop prune expired state, disable state expiry", "height", height, "root", root, "scheme", p.config.CacheConfig.StateScheme) return nil } @@ -742,7 +744,7 @@ func (p *Pruner) ExpiredPrune(height *big.Int, root common.Hash) error { }() go func() { defer tasksWG.Done() - rets[1] = asyncPruneExpiredStorageInDisk(p.db, pruneExpiredInDiskCh, bloom, p.config.CacheConfig.StateScheme) + rets[1] = asyncPruneExpiredStorageInDisk(p.db, pruneExpiredInDiskCh, bloom, p.config.ExpiryCfg) }() rets[2] = snapshot.TraverseContractTrie(p.snaptree, root, scanExpiredTrieCh) @@ -874,6 +876,14 @@ func asyncScanExpiredInTrie(db *trie.Database, stateRoot common.Hash, epoch type return err } tr.SetEpoch(epoch) + if st.MoreThread() { + st.Schedule(func() { + if err = tr.ScanForPrune(st); err != nil { + log.Error("asyncScanExpiredInTrie, ScanForPrune err", "id", item, "err", err) + } + }) + continue + } if err = tr.ScanForPrune(st); err != nil { log.Error("asyncScanExpiredInTrie, ScanForPrune err", "id", item, "err", err) return err @@ -883,7 +893,7 @@ func asyncScanExpiredInTrie(db *trie.Database, stateRoot common.Hash, epoch type return nil } -func asyncPruneExpiredStorageInDisk(diskdb ethdb.Database, pruneExpiredInDisk chan *trie.NodeInfo, bloom *bloomfilter.Filter, scheme string) error { +func asyncPruneExpiredStorageInDisk(diskdb ethdb.Database, pruneExpiredInDisk chan *trie.NodeInfo, bloom *bloomfilter.Filter, cfg *types.StateExpiryConfig) error { var ( itemCount = 0 trieCount = 0 @@ -902,44 +912,26 @@ func asyncPruneExpiredStorageInDisk(diskdb ethdb.Database, pruneExpiredInDisk ch info.IsBranch, "isLeaf", info.IsLeaf) itemCount++ addr := info.Addr - switch scheme { + trieCount++ + trieSize += ContractTrieNodeAvgSize + switch cfg.StateScheme { case rawdb.PathScheme: - val := rawdb.ReadTrieNode(diskdb, addr, info.Path, info.Hash, rawdb.PathScheme) - if len(val) == 0 { - log.Debug("cannot find source trie?", "addr", addr, "path", info.Path, "hash", info.Hash, "epoch", info.Epoch) - } else { - trieCount++ - trieSize += common.StorageSize(len(val) + FixedPrefixAndAddrSize + len(info.Path)) - rawdb.DeleteTrieNode(batch, addr, info.Path, info.Hash, rawdb.PathScheme) - } + rawdb.DeleteTrieNode(batch, addr, info.Path, info.Hash, rawdb.PathScheme) case rawdb.HashScheme: // hbss has shared kv, so using bloom to filter them out. if bloom == nil || !bloom.Contains(stateBloomHasher(info.Hash.Bytes())) { - val := rawdb.ReadTrieNode(diskdb, addr, info.Path, info.Hash, rawdb.HashScheme) - if len(val) == 0 { - log.Debug("cannot find source trie?", "addr", addr, "path", info.Path, "hash", info.Hash, "epoch", info.Epoch) - } else { - trieCount++ - trieSize += common.StorageSize(len(val) + FixedPrefixAndAddrSize) - rawdb.DeleteTrieNode(batch, addr, info.Path, info.Hash, rawdb.HashScheme) - } + rawdb.DeleteTrieNode(batch, addr, info.Path, info.Hash, rawdb.HashScheme) } } // delete epoch meta in HBSS - if info.IsBranch && rawdb.HashScheme == scheme { - val := rawdb.ReadEpochMetaPlainState(diskdb, addr, string(info.Path)) - if len(val) == 0 && info.Epoch > types.StateEpoch0 { - log.Debug("cannot find source epochmeta?", "addr", addr, "path", info.Path, "hash", info.Hash, "epoch", info.Epoch) - } - if len(val) > 0 { - epochMetaCount++ - epochMetaSize += common.StorageSize(FixedPrefixAndAddrSize + len(info.Path) + len(val)) - rawdb.DeleteEpochMetaPlainState(batch, addr, string(info.Path)) - } + if info.IsBranch && rawdb.HashScheme == cfg.StateScheme { + epochMetaCount++ + epochMetaSize += ContractEpochMetaAvgSize + rawdb.DeleteEpochMetaPlainState(batch, addr, string(info.Path)) } // replace snapshot kv only epoch if info.IsLeaf { - size, err := snapshot.ShrinkExpiredLeaf(batch, diskdb, addr, info.Key, info.Epoch, scheme) + size, err := snapshot.ShrinkExpiredLeaf(batch, diskdb, addr, info.Key, cfg) if err != nil { log.Error("ShrinkExpiredLeaf err", "addr", addr, "key", info.Key, "err", err) } diff --git a/core/state/snapshot/snapshot_expire.go b/core/state/snapshot/snapshot_expire.go index 769e0e3c60..ba68c99342 100644 --- a/core/state/snapshot/snapshot_expire.go +++ b/core/state/snapshot/snapshot_expire.go @@ -5,31 +5,18 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" +) + +const ( + ContractSnapshotAvgSize = 17 // This is an estimated value ) // ShrinkExpiredLeaf tool function for snapshot kv prune -func ShrinkExpiredLeaf(writer ethdb.KeyValueWriter, reader ethdb.KeyValueReader, accountHash common.Hash, storageHash common.Hash, epoch types.StateEpoch, scheme string) (int64, error) { - switch scheme { - case rawdb.HashScheme: - //cannot prune snapshot in hbss, because it will used for trie prune, but it's ok in pbss. - case rawdb.PathScheme: - val := rawdb.ReadStorageSnapshot(reader, accountHash, storageHash) - if len(val) == 0 { - log.Debug("cannot find source snapshot?", "addr", accountHash, "key", storageHash, "epoch", epoch) - return 0, nil - } - valWithEpoch := NewValueWithEpoch(epoch, nil) - enc, err := EncodeValueToRLPBytes(valWithEpoch) - if err != nil { - return 0, err - } - rawdb.WriteStorageSnapshot(writer, accountHash, storageHash, enc) - shrinkSize := len(val) - len(enc) - if shrinkSize < 0 { - shrinkSize = 0 - } - return int64(shrinkSize), nil +func ShrinkExpiredLeaf(writer ethdb.KeyValueWriter, reader ethdb.KeyValueReader, accountHash common.Hash, storageHash common.Hash, cfg *types.StateExpiryConfig) (int64, error) { + if types.StateExpiryPruneLevel1 == cfg.PruneLevel { + return 0, nil } - return 0, nil + + rawdb.DeleteStorageSnapshot(writer, accountHash, storageHash) + return ContractSnapshotAvgSize, nil } diff --git a/core/state/snapshot/snapshot_expire_test.go b/core/state/snapshot/snapshot_expire_test.go index 0bb2dd762c..57ad0d08a3 100644 --- a/core/state/snapshot/snapshot_expire_test.go +++ b/core/state/snapshot/snapshot_expire_test.go @@ -15,14 +15,35 @@ var ( storageHash1 = common.HexToHash("0x0bb2f3e66816c6fd12513f053d5ee034b1fa2d448a1dc8ee7f56e4c87d6c53fe") ) -func TestShrinkExpiredLeaf(t *testing.T) { +func TestShrinkExpiredLeaf_Level1(t *testing.T) { db := memorydb.New() rawdb.WriteStorageSnapshot(db, accountHash, storageHash1, encodeSnapVal(NewRawValue([]byte("val1")))) - _, err := ShrinkExpiredLeaf(db, db, accountHash, storageHash1, types.StateEpoch0, rawdb.PathScheme) + cfg := &types.StateExpiryConfig{ + StateScheme: rawdb.PathScheme, + PruneLevel: types.StateExpiryPruneLevel0, + } + + _, err := ShrinkExpiredLeaf(db, db, accountHash, storageHash1, cfg) + assert.NoError(t, err) + + assert.True(t, len(rawdb.ReadStorageSnapshot(db, accountHash, storageHash1)) == 0) +} + +func TestShrinkExpiredLeaf_Level0(t *testing.T) { + db := memorydb.New() + raw := encodeSnapVal(NewRawValue([]byte("val1"))) + rawdb.WriteStorageSnapshot(db, accountHash, storageHash1, raw) + + cfg := &types.StateExpiryConfig{ + StateScheme: rawdb.PathScheme, + PruneLevel: types.StateExpiryPruneLevel1, + } + + _, err := ShrinkExpiredLeaf(db, db, accountHash, storageHash1, cfg) assert.NoError(t, err) - assert.Equal(t, encodeSnapVal(NewValueWithEpoch(types.StateEpoch0, nil)), rawdb.ReadStorageSnapshot(db, accountHash, storageHash1)) + assert.Equal(t, raw, rawdb.ReadStorageSnapshot(db, accountHash, storageHash1)) } func encodeSnapVal(val SnapValue) []byte { diff --git a/core/state/state_expiry.go b/core/state/state_expiry.go index 49c867f4db..ebb7ba7974 100644 --- a/core/state/state_expiry.go +++ b/core/state/state_expiry.go @@ -30,6 +30,7 @@ type stateExpiryMeta struct { epoch types.StateEpoch originalRoot common.Hash originalHash common.Hash + pruneLevel uint8 } func defaultStateExpiryMeta() *stateExpiryMeta { diff --git a/core/state/state_object.go b/core/state/state_object.go index ac5a3aa32d..54de1c9bba 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -269,12 +269,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { start := time.Now() // handle state expiry situation if s.db.EnableExpire() { - var dbError error - enc, err, dbError = s.getExpirySnapStorage(key) - if dbError != nil { - s.db.setError(fmt.Errorf("state expiry getExpirySnapStorage, contract: %v, key: %v, err: %v", s.address, key, dbError)) - return common.Hash{} - } + enc, err = s.getExpirySnapStorage(key) if len(enc) > 0 { value.SetBytes(enc) } @@ -294,7 +289,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } // If the snapshot is unavailable or reading from it fails, load from the database. - if s.db.snap == nil || err != nil { + if s.db.snap == nil || err != nil || s.needCheckExpiredInTrie(enc, err) { getCommittedStorageTrieMeter.Mark(1) start := time.Now() var tr Trie @@ -307,15 +302,15 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { s.db.setError(fmt.Errorf("state object getTrie err, contract: %v, err: %v", s.address, err)) return common.Hash{} } - val, err := tr.GetStorage(s.address, key.Bytes()) + var val []byte + val, err = tr.GetStorage(s.address, key.Bytes()) if metrics.EnabledExpensive { s.db.StorageReads += time.Since(start) } // handle state expiry situation if s.db.EnableExpire() { if path, ok := trie.ParseExpiredNodeErr(err); ok { - //log.Debug("GetCommittedState expired in trie", "addr", s.address, "key", key, "err", err) - val, err = s.tryReviveState(path, key, false) + val, err = s.tryReviveState(path, key) getCommittedStorageExpiredMeter.Mark(1) } else if err == nil { getCommittedStorageUnexpiredMeter.Mark(1) @@ -520,9 +515,11 @@ func (s *stateObject) updateTrie() (Trie, error) { if len(value) == 0 { err := tr.DeleteStorage(s.address, key[:]) if path, ok := trie.ParseExpiredNodeErr(err); ok { - touchExpiredStorage[key] = value - if _, err = tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true); err != nil { - s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, err)) + _, reviveErr := tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true) + if reviveErr != nil { + s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, reviveErr)) + } else { + touchExpiredStorage[key] = value } } else if err != nil { s.db.setError(fmt.Errorf("updateTrie DeleteStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) @@ -532,9 +529,11 @@ func (s *stateObject) updateTrie() (Trie, error) { } else { err := tr.UpdateStorage(s.address, key[:], value) if path, ok := trie.ParseExpiredNodeErr(err); ok { - touchExpiredStorage[key] = value - if _, err = tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true); err != nil { - s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, err)) + _, reviveErr := tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true) + if reviveErr != nil { + s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, reviveErr)) + } else { + touchExpiredStorage[key] = value } } else if err != nil { s.db.setError(fmt.Errorf("updateTrie UpdateStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) @@ -546,18 +545,43 @@ func (s *stateObject) updateTrie() (Trie, error) { usedStorage = append(usedStorage, common.CopyBytes(key[:])) } - // re-execute touched expired storage - for key, value := range touchExpiredStorage { - if len(value) == 0 { - if err := tr.DeleteStorage(s.address, key[:]); err != nil { - s.db.setError(fmt.Errorf("updateTrie DeleteStorage in touchExpiredStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) - } - //log.Debug("updateTrie DeleteStorage in touchExpiredStorage", "contract", s.address, "key", key, "epoch", s.db.Epoch(), "value", value, "tr.epoch", tr.Epoch(), "err", err, "tr", fmt.Sprintf("%p", tr), "ins", fmt.Sprintf("%p", s)) - } else { - if err := tr.UpdateStorage(s.address, key[:], value); err != nil { - s.db.setError(fmt.Errorf("updateTrie UpdateStorage in touchExpiredStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) + // re-execute touched expired storage, it may have recurse expired tree + if s.db.EnableExpire() { + tryTimes := 65 * len(touchExpiredStorage) // trie height max is 65, prevent too many revive fail + for len(touchExpiredStorage) > 0 && tryTimes > 0 { + nextRevive := make(map[common.Hash][]byte, len(touchExpiredStorage)) + for key, value := range touchExpiredStorage { + if len(value) == 0 { + err := tr.DeleteStorage(s.address, key[:]) + if path, ok := trie.ParseExpiredNodeErr(err); ok { + _, reviveErr := tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true) + if reviveErr != nil { + s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, reviveErr)) + } else { + nextRevive[key] = value + } + } else if err != nil { + s.db.setError(fmt.Errorf("updateTrie DeleteStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) + } + } else { + err := tr.UpdateStorage(s.address, key[:], value) + if path, ok := trie.ParseExpiredNodeErr(err); ok { + _, reviveErr := tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, path, key, true) + if reviveErr != nil { + s.db.setError(fmt.Errorf("updateTrie DeleteStorage tryReviveState err, contract: %v, key: %v, path: %v, err: %v", s.address, key, path, reviveErr)) + } else { + nextRevive[key] = value + } + } else if err != nil { + s.db.setError(fmt.Errorf("updateTrie UpdateStorage err, contract: %v, key: %v, err: %v", s.address, key, err)) + } + } } - //log.Debug("updateTrie UpdateStorage in touchExpiredStorage", "contract", s.address, "key", key, "epoch", s.db.Epoch(), "value", value, "tr.epoch", tr.Epoch(), "err", err, "tr", fmt.Sprintf("%p", tr), "ins", fmt.Sprintf("%p", s)) + touchExpiredStorage = nextRevive + tryTimes-- + } + if len(touchExpiredStorage) > 0 { + s.db.setError(fmt.Errorf("updateTrie cannot revive expired storage, contract: %v, keys: %v", s.address, touchExpiredStorage)) } } }() @@ -898,27 +922,13 @@ func (s *stateObject) queryFromReviveState(reviveState map[string]common.Hash, k } // tryReviveState request expired state from remote full state node; -func (s *stateObject) tryReviveState(prefixKey []byte, key common.Hash, resolvePath bool) ([]byte, error) { +func (s *stateObject) tryReviveState(prefixKey []byte, key common.Hash) ([]byte, error) { tr, err := s.getPendingReviveTrie() if err != nil { return nil, err } - // if no prefix, query from revive trie, got the newest expired info - if resolvePath { - val, err := tr.GetStorage(s.address, key.Bytes()) - if err == nil { - // TODO(asyukii): temporary fix snap expired, but trie not expire, may investigate more later. - s.pendingReviveState[string(crypto.Keccak256(key[:]))] = common.BytesToHash(val) - return val, nil - } - path, ok := trie.ParseExpiredNodeErr(err) - if !ok { - return nil, fmt.Errorf("cannot find expired state from trie, err: %v", err) - } - prefixKey = path - } - + // if there need resolvePath from snap, it must not locally revive successful. kvs, err := tryReviveState(s.db.expiryMeta, s.address, s.data.Root, tr, prefixKey, key, false) if err != nil { return nil, err @@ -933,45 +943,52 @@ func (s *stateObject) tryReviveState(prefixKey []byte, key common.Hash, resolveP return val.Bytes(), nil } -func (s *stateObject) getExpirySnapStorage(key common.Hash) ([]byte, error, error) { +func (s *stateObject) getExpirySnapStorage(key common.Hash) ([]byte, error) { enc, err := s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) if err != nil { - return nil, err, nil + return nil, err } var val snapshot.SnapValue if len(enc) > 0 { val, err = snapshot.DecodeValueFromRLPBytes(enc) if err != nil { - return nil, nil, err + return nil, err } } if val == nil { // record access empty kv, try touch in updateTrie for duplication s.futureReviveState(key) - return nil, nil, nil + return nil, nil } s.originStorageEpoch[key] = val.GetEpoch() - if !types.EpochExpired(val.GetEpoch(), s.db.Epoch()) { + if types.EpochExpired(val.GetEpoch(), s.db.Epoch()) { + getCommittedStorageExpiredMeter.Mark(1) + s.futureReviveState(key) + } else { getCommittedStorageUnexpiredMeter.Mark(1) - return val.GetVal(), nil, nil } + return val.GetVal(), nil +} - getCommittedStorageExpiredMeter.Mark(1) - // if found value not been pruned, just return, local revive later - if s.db.EnableLocalRevive() && len(val.GetVal()) > 0 { - s.futureReviveState(key) - getCommittedStorageExpiredLocalReviveMeter.Mark(1) - return val.GetVal(), nil, nil +func (s *stateObject) needCheckExpiredInTrie(enc []byte, err error) bool { + if !s.db.EnableExpire() { + return false } - //log.Debug("GetCommittedState expired in snapshot", "addr", s.address, "key", key, "val", val, "enc", enc, "err", err) - // handle from remoteDB, if got err just setError, or return to revert in consensus version. - valRaw, err := s.tryReviveState(nil, key, true) - if err != nil { - return nil, nil, err + if types.StateExpiryPruneLevel1 == s.db.expiryMeta.pruneLevel { + return false + } + + // if in PruneLevel0, nil val need check expiry in trie + if s.db.expiryMeta.epoch <= types.StateEpoch1 { + return false + } + + if err != nil || len(enc) == 0 { + return true } - return valRaw, nil, nil + return false } diff --git a/core/state/statedb.go b/core/state/statedb.go index 85a4415bf9..aba925d51c 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -46,16 +46,15 @@ import ( const defaultNumOfSlots = 100 var ( - getCommittedStorageMeter = metrics.NewRegisteredMeter("state/contract/committed", nil) - getCommittedStorageSnapMeter = metrics.NewRegisteredMeter("state/contract/committed/snap", nil) - getCommittedStorageTrieMeter = metrics.NewRegisteredMeter("state/contract/committed/trie", nil) - getCommittedStorageExpiredMeter = metrics.NewRegisteredMeter("state/contract/committed/expired", nil) - getCommittedStorageExpiredLocalReviveMeter = metrics.NewRegisteredMeter("state/contract/committed/expired/localrevive", nil) - getCommittedStorageUnexpiredMeter = metrics.NewRegisteredMeter("state/contract/committed/unexpired", nil) - getCommittedStorageRemoteMeter = metrics.NewRegisteredMeter("state/contract/committed/remote", nil) - storageReadMeter = metrics.NewRegisteredMeter("state/contract/state/read", nil) - storageWriteMeter = metrics.NewRegisteredMeter("state/contract/state/write", nil) - storageAccessMeter = metrics.NewRegisteredMeter("state/contract/state/access", nil) + getCommittedStorageMeter = metrics.NewRegisteredMeter("state/contract/committed", nil) + getCommittedStorageSnapMeter = metrics.NewRegisteredMeter("state/contract/committed/snap", nil) + getCommittedStorageTrieMeter = metrics.NewRegisteredMeter("state/contract/committed/trie", nil) + getCommittedStorageExpiredMeter = metrics.NewRegisteredMeter("state/contract/committed/expired", nil) + getCommittedStorageUnexpiredMeter = metrics.NewRegisteredMeter("state/contract/committed/unexpired", nil) + getCommittedStorageRemoteMeter = metrics.NewRegisteredMeter("state/contract/committed/remote", nil) + storageReadMeter = metrics.NewRegisteredMeter("state/contract/state/read", nil) + storageWriteMeter = metrics.NewRegisteredMeter("state/contract/state/write", nil) + storageAccessMeter = metrics.NewRegisteredMeter("state/contract/state/access", nil) ) type revision struct { @@ -264,6 +263,7 @@ func (s *StateDB) InitStateExpiryFeature(config *types.StateExpiryConfig, remote epoch: epoch, originalRoot: s.originalRoot, originalHash: startAtBlockHash, + pruneLevel: config.PruneLevel, } //log.Debug("StateDB enable state expiry feature", "expectHeight", expectHeight, "startAtBlockHash", startAtBlockHash, "epoch", epoch) return s diff --git a/core/types/state_expiry.go b/core/types/state_expiry.go index bb83a31f2c..1b4c59589b 100644 --- a/core/types/state_expiry.go +++ b/core/types/state_expiry.go @@ -9,9 +9,8 @@ import ( ) const ( - StateExpiryPruneLevel0 = iota // StateExpiryPruneLevel0 is for HBSS, in HBSS we cannot prune any expired snapshot, it need rebuild trie for old tire node prune, it also cannot prune any shared trie node too. - StateExpiryPruneLevel1 // StateExpiryPruneLevel1 is the default level, it left some expired snapshot meta for performance friendly. - StateExpiryPruneLevel2 // StateExpiryPruneLevel2 will prune all expired snapshot kvs and trie nodes, but it will access more times in tire when execution. TODO(0xbundler): will support it later + StateExpiryPruneLevel0 = iota // StateExpiryPruneLevel0 is the default level, it will prune all expired snapshot kvs and trie nodes, but it will access more times in tire when execution. It not supports in HBSS. + StateExpiryPruneLevel1 // StateExpiryPruneLevel1 it left all snapshot & epoch meta for performance friendly. ) type StateExpiryConfig struct { diff --git a/ethdb/fullstatedb.go b/ethdb/fullstatedb.go index 80b9c14ed5..ee1fc8dc97 100644 --- a/ethdb/fullstatedb.go +++ b/ethdb/fullstatedb.go @@ -86,7 +86,7 @@ func (f *FullStateRPCServer) GetStorageReviveProof(stateRoot common.Hash, accoun } // TODO(0xbundler): add timeout in flags? - ctx, cancelFunc := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancelFunc := context.WithTimeout(context.Background(), 300*time.Millisecond) defer cancelFunc() err := f.client.CallContext(ctx, &result, "eth_getStorageReviveProof", stateRoot, account, root, uncachedKeys, uncachedPrefixKeys) if err != nil { diff --git a/trie/trie.go b/trie/trie.go index c2039d6de2..34501b4bba 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -1673,49 +1673,48 @@ func (t *Trie) findExpiredSubTree(n node, path []byte, epoch types.StateEpoch, p } func (t *Trie) recursePruneExpiredNode(n node, path []byte, epoch types.StateEpoch, st *ScanTask) error { + np := renewBytes(path) switch n := n.(type) { case *shortNode: st.Stat(true) - subPath := append(path, n.Key...) - key := common.Hash{} - _, isLeaf := n.Val.(valueNode) - if isLeaf { - key = common.BytesToHash(hexToKeybytes(subPath)) + err := t.recursePruneExpiredNode(n.Val, append(path, n.Key...), epoch, st) + if err != nil { + return err } if st.findExpired { + key := common.Hash{} + _, isLeaf := n.Val.(valueNode) + if isLeaf { + key = common.BytesToHash(hexToKeybytes(append(np, n.Key...))) + } st.itemCh <- &NodeInfo{ Addr: t.owner, Hash: common.BytesToHash(n.flags.hash), - Path: renewBytes(path), + Path: np, Key: key, Epoch: epoch, IsLeaf: isLeaf, } } - - err := t.recursePruneExpiredNode(n.Val, subPath, epoch, st) - if err != nil { - return err - } return nil case *fullNode: st.Stat(true) + // recurse child, and except valueNode + for i := 0; i < BranchNodeLength-1; i++ { + err := t.recursePruneExpiredNode(n.Children[i], append(path, byte(i)), n.EpochMap[i], st) + if err != nil { + return err + } + } if st.findExpired { st.itemCh <- &NodeInfo{ Addr: t.owner, Hash: common.BytesToHash(n.flags.hash), - Path: renewBytes(path), + Path: np, Epoch: epoch, IsBranch: true, } } - // recurse child, and except valueNode - for i := 0; i < BranchNodeLength-1; i++ { - err := t.recursePruneExpiredNode(n.Children[i], append(path, byte(i)), n.EpochMap[i], st) - if err != nil { - return err - } - } return nil case hashNode: // hashNode is a index of trie node storage, need not prune.